├── .cartridge.yml ├── .editorconfig ├── .github └── workflows │ ├── publish.yaml │ └── test_on_push.yaml ├── .gitignore ├── .luacheckrc ├── .luacov ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── deps.sh ├── doc └── assets │ └── config-loader.png ├── migrations-scm-1.rockspec ├── migrator ├── config-loader.lua ├── directory-loader.lua ├── init.lua ├── utils.lua └── version.lua ├── test ├── entrypoint │ └── check_roles_enabled_init.lua ├── helper.lua ├── helper │ ├── integration.lua │ └── utils.lua ├── init.lua ├── integration │ ├── basic_test.lua │ ├── check_roles_enabled_test.lua │ ├── console_run_test.lua │ ├── fockups_test.lua │ ├── get_applied_test.lua │ ├── migrations-gh-65 │ │ └── 001_create_func.lua │ ├── migrations-gh-66 │ │ └── 01_first.lua │ ├── migrations │ │ ├── 01_first.lua │ │ ├── 02_second.lua │ │ └── 03_sharded.lua │ ├── migrations_check_roles_enabled │ │ └── 01_first.lua │ ├── move_migrations_state_test.lua │ ├── new_replicaset_test.lua │ ├── no_cartridge_ddl_test.lua │ └── upgrade_test.lua └── unit │ ├── check_roles_enabled_test.lua │ ├── config_loader_test.lua │ ├── directory_loader_test.lua │ └── migrations │ └── positive │ ├── 01_first.lua │ └── 02_second.lua └── tmp └── .keep /.cartridge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | run_dir: 'tmp' 3 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | 9 | [CMakeLists.txt] 10 | indent_style = space 11 | indent_size = 4 12 | 13 | [*.cmake] 14 | indent_style = space 15 | indent_size = 4 16 | 17 | [*.lua] 18 | indent_style = space 19 | indent_size = 4 20 | 21 | [*.{h,c,cc}] 22 | indent_style = tab 23 | tab_width = 8 24 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | tags: ['*'] 7 | 8 | jobs: 9 | version-check: 10 | # We need this job to run only on push with tag. 11 | if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') }} 12 | runs-on: ubuntu-20.04 13 | steps: 14 | - name: Check module version 15 | uses: tarantool/actions/check-module-version@master 16 | with: 17 | module-name: 'migrator' 18 | 19 | publish-scm-1: 20 | if: github.ref == 'refs/heads/master' 21 | runs-on: ubuntu-20.04 22 | steps: 23 | - uses: actions/checkout@v2 24 | - uses: tarantool/rocks.tarantool.org/github-action@master 25 | with: 26 | auth: ${{ secrets.ROCKS_AUTH }} 27 | files: migrations-scm-1.rockspec 28 | 29 | publish-tag: 30 | if: startsWith(github.ref, 'refs/tags/') 31 | needs: version-check 32 | runs-on: ubuntu-20.04 33 | steps: 34 | - uses: actions/checkout@v3 35 | 36 | - uses: tarantool/setup-tarantool@v2 37 | with: 38 | tarantool-version: '2.10' 39 | - run: echo $PWD/.rocks/bin >> $GITHUB_PATH 40 | 41 | # Make a release 42 | - run: echo TAG=${GITHUB_REF##*/} >> $GITHUB_ENV 43 | - run: tarantoolctl rocks new_version --tag ${{ env.TAG }} 44 | - run: tarantoolctl rocks install migrations-${{ env.TAG }}-1.rockspec 45 | - run: tarantoolctl rocks pack migrations ${{ env.TAG }} 46 | 47 | - uses: tarantool/rocks.tarantool.org/github-action@master 48 | with: 49 | auth: ${{ secrets.ROCKS_AUTH }} 50 | files: | 51 | migrations-${{ env.TAG }}-1.rockspec 52 | migrations-${{ env.TAG }}-1.all.rock 53 | -------------------------------------------------------------------------------- /.github/workflows/test_on_push.yaml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: [push] 4 | 5 | jobs: 6 | all: 7 | runs-on: ubuntu-20.04 8 | container: 9 | image: centos:7 10 | timeout-minutes: 10 11 | env: 12 | DOWNLOAD_TOKEN: ${{ secrets.DOWNLOAD_TOKEN }} 13 | ROCKS_USERNAME: ${{ secrets.ROCKS_USERNAME }} 14 | ROCKS_PASSWORD: ${{ secrets.ROCKS_PASSWORD }} 15 | steps: 16 | - name: Install centos packages 17 | run: | 18 | yum -y install epel-release https://repo.ius.io/ius-release-el7.rpm 19 | yum -y update 20 | yum -y install wget git cmake make unzip gcc gcc-c++ 21 | - name: Checkout sources 22 | uses: actions/checkout@v3 23 | - name: Install sdk 24 | run: make sdk 25 | - name: Install rocks 26 | shell: bash 27 | run: source sdk/env.sh && make .rocks 28 | - name: Run tests 29 | run: make test 30 | 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .rocks 2 | .swo 3 | .swp 4 | CMakeCache.txt 5 | CMakeFiles 6 | cmake_install.cmake 7 | *.dylib 8 | *.idea 9 | __pycache__ 10 | *pyc 11 | .cache 12 | .pytest_cache 13 | .vagrant 14 | .DS_Store 15 | *.xlog 16 | *.snap 17 | *.rpm 18 | *.deb 19 | *.tar.gz 20 | node_modules 21 | /tmp/* 22 | !/tmp/.keep 23 | .vscode 24 | .history 25 | sdk 26 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | include_files = {'**/*.lua', '*.luacheckrc', '*.rockspec'} 2 | exclude_files = {'.rocks/', 'tmp/', 'sdk/', '.history/'} 3 | max_line_length = 150 4 | -------------------------------------------------------------------------------- /.luacov: -------------------------------------------------------------------------------- 1 | statsfile = 'tmp/luacov.stats.out' 2 | reportfile = 'tmp/luacov.report.out' 3 | exclude = { 4 | 'test/', 5 | } 6 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 5 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 6 | 7 | ## [1.0.2] 8 | 9 | ### Fixed: 10 | - State space bootstrap on cluster rolling upgrade (gh-77) 11 | 12 | ## [1.0.1] 13 | 14 | ### Added: 15 | - BSD License. 16 | 17 | ## [1.0.0] 18 | 19 | ### Fixed: 20 | - Migrations do not apply to newly added replica sets in the cluster (gh-65) 21 | Applied migration names are moved from the cluster-wide configuration to 22 | the space on each node. 23 | 24 | ### Added: 25 | - An API for moving existing migration names from the cluster configuration to 26 | a space. 27 | - API for getting applied migrations list for the cluster. 28 | 29 | ## [0.7.0] 30 | ### Added: 31 | - `utils.check_roles_enabled` helper function 32 | to check whether roles are enabled on the instance (gh-68) 33 | 34 | ## [0.6.0] 35 | ### Added: 36 | - Configurable timeout for storage migrations (gh-66) 37 | ### Fixed: 38 | - Running tests with Tarantool 2.11+ 39 | - Running tests with tarantool/http 1.2.0+ (gh-63) 40 | 41 | ## [0.5.0] 42 | ### Added: 43 | - Versioning support 44 | 45 | ## [0.4.2] 46 | ### Fixed: 47 | - Fetch schema from a replicaset leader to apply on the clusterwide config even 48 | when `migrations.up()` is called on a replica (gh-56). The local schema on 49 | the replica may be not the most actual due to replication lag. 50 | - Issue a warning into log when `register_sharding_key()` is called with 51 | `{'bucket_id'}` key (gh-49). It is likely a mistake: sharding key is a set of 52 | fields, which are used to calculate `bucket_id`, not the `bucket_id` itself. 53 | 54 | ## [0.4.1] 55 | ### Fixed: 56 | - Unclear error output in some cases 57 | 58 | ## [0.4.0] 59 | ### Fixed: 60 | - Fix crash during init when instance http server disabled 61 | ### Added: 62 | - Lua API to trigger migrations from console 63 | 64 | ## [0.3.1] 65 | ### Fixed: 66 | - Fix "fiber name is too long" for long instance names 67 | 68 | ## [0.3.0] 69 | ### Added 70 | - config-loader to load migrations from Cartridge clusterwide config 71 | 72 | ## [0.2.0] 73 | ### Fixed: 74 | - Fix bug in "second" migrations run, that would lead to each migration applying again and again 75 | 76 | ## [0.1.0] 77 | ### Added: 78 | - Basic functionality 79 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2020-2024 Tarantool. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | version := scm-1 2 | 3 | .PHONY: all doc test schema install 4 | 5 | BUNDLE_VERSION=2.8.4-0-g47e6bd362-r508 6 | COMMIT_TAG = $(shell git describe) 7 | 8 | all: doc 9 | mkdir -p doc 10 | 11 | centos-packages: 12 | yum -y install epel-release && yum -y update && yum -y install wget git cmake make unzip 13 | 14 | sdk: Makefile 15 | wget https://tarantool:$(DOWNLOAD_TOKEN)@download.tarantool.io/enterprise/tarantool-enterprise-bundle-$(BUNDLE_VERSION).tar.gz 16 | tar -xzf tarantool-enterprise-bundle-$(BUNDLE_VERSION).tar.gz 17 | rm tarantool-enterprise-bundle-$(BUNDLE_VERSION).tar.gz 18 | mv tarantool-enterprise sdk 19 | 20 | .rocks: migrations-scm-1.rockspec 21 | $(shell) ./deps.sh 22 | 23 | lint: 24 | .rocks/bin/luacheck . 25 | 26 | test: lint 27 | rm -f luacov* 28 | .rocks/bin/luatest --verbose --shuffle all --coverage 29 | .rocks/bin/luacov . && grep -A999 '^Summary' tmp/luacov.report.out 30 | 31 | push-scm-1: 32 | curl --fail -X PUT -F "rockspec=@migrations-scm-1.rockspec" https://${ROCKS_USERNAME}:${ROCKS_PASSWORD}@rocks.tarantool.org 33 | 34 | push-release: 35 | cd release/ \ 36 | && curl --fail -X PUT -F "rockspec=@migrations-${COMMIT_TAG}-1.rockspec" https://${ROCKS_USERNAME}:${ROCKS_PASSWORD}@rocks.tarantool.org \ 37 | && curl --fail -X PUT -F "rockspec=@migrations-${COMMIT_TAG}-1.all.rock" https://${ROCKS_USERNAME}:${ROCKS_PASSWORD}@rocks.tarantool.org 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Migrations manager for Tarantool Cartridge 2 | 3 | @lookup README.md 4 | 5 | Migrations module allows you to run cluster-wide migrations for your data. 6 | 7 | It stores the list of applied migrations in cluster-wide config and applies resulting schema to cartridge `ddl`. 8 | 9 | ## Usage 10 | 11 | 1) Add `migrations` dependency: 12 | ```lua 13 | -- -scm-1.rockspec 14 | dependencies = { 15 | ... 16 | 'migrations == -1', 17 | ... 18 | } 19 | ``` 20 | 21 | 2) Add `migrator` to the list of cartridge roles in `init.lua`: 22 | ```lua 23 | -- init.lua 24 | .... 25 | cartridge.cfg({ 26 | roles = { 27 | 'migrator', 28 | .... 29 | } 30 | }) 31 | ``` 32 | 33 | 3) Put migrations code to `./migrations` folder in your app. By default, migrator loads all files from it using lexicographical order. 34 | Every migration (e. g. `0001_create_my_sharded_space_DATETIME.lua`) should expose a single parameter-less function `up`: 35 | ```lua 36 | return { 37 | up = function() 38 | local utils = require('migrator.utils') 39 | local f = box.schema.create_space('my_sharded_space', { 40 | format = { 41 | { name = 'key', type = 'string' }, 42 | { name = 'bucket_id', type = 'unsigned' }, 43 | { name = 'value', type = 'any', is_nullable = true } 44 | }, 45 | if_not_exists = true, 46 | }) 47 | f:create_index('primary', { 48 | parts = { 'key' }, 49 | if_not_exists = true, 50 | }) 51 | f:create_index('bucket_id', { 52 | parts = { 'bucket_id' }, 53 | if_not_exists = true, 54 | unique = false 55 | }) 56 | utils.register_sharding_key('my_sharded_space', {'key'}) 57 | return true 58 | end 59 | } 60 | ``` 61 | 62 | 4) Call `curl -X POST http://:/migrations/up` once you are ready to migrate or connect to any instance of cluster and call `require('migrator').up()`. 63 | 64 | 5) What will happen then: 65 | * coordinator node (the one you curled upon) will trigger migrations execution on all replicaset leaders; 66 | * each replicaset leader will apply all available migrations and reply to coordinator; 67 | * each replicaset leader stores a list of applied migrations in a space; 68 | * if all replies are successful, coordinator will apply changes to the resulting cluster ddl-schema. 69 | 70 | 6) That's it! 71 | 72 | ## Advanced usage 73 | 74 | IMPORTANT: code snippets below should be embedded to `init.lua`, so they would take effect on all nodes of the cluster. 75 | 76 | 1) Change directory where migrations are located: embed the following to init.lua 77 | 78 | ```lua 79 | local migrator = require('migrator') 80 | local my_directory_loader = require('migrator.directory-loader').new('test/integration/migrations') 81 | migrator.set_loader(my_directory_loader) 82 | ``` 83 | 84 | 2) ... or use `migrator.config-loader` to load migrations from Tarantool Cartridge clusterwide config. 85 | 86 | Configure `migrator` to use `config-loader`: 87 | 88 | ```lua 89 | local migrator = require('migrator') 90 | local config_loader = require('migrator.config-loader').new() 91 | migrator.set_loader(config_loader) 92 | ``` 93 | 94 | Navigate to Cartridge webui "Code" to write your migrations. 95 | Migrations must be stored in *.lua files under "migrations/source" key: 96 | 97 | ![config-loader example](doc/assets/config-loader.png) 98 | 99 | 3) ... or use your own loader - it should expose a single function `list(self)` which returns a similar-looking array: 100 | 101 | ```lua 102 | local my_loader = { 103 | list = function(_) 104 | return { 105 | { 106 | name = '01_first', 107 | up = function() ... end 108 | }, 109 | } 110 | end 111 | } 112 | migrator.set_loader(my_loader) 113 | ``` 114 | 115 | 4) Disable `cartridge.ddl` usage: 116 | 117 | ```lua 118 | migrator.set_use_cartridge_ddl(false) 119 | ``` 120 | 121 | In this case, resulting schema will not be registered via `cartridge_set_schema` 122 | 123 | ## Utils, helpers, tips and tricks 124 | * Specify a sharding key for `cartridge.ddl` (if you use it) using `utils.register_sharding_key`: 125 | ```lua 126 | up = function() 127 | local utils = require('migrator.utils') 128 | local f = box.schema.create_space('my_sharded_space', { 129 | format = { 130 | { name = 'key', type = 'string' }, 131 | { name = 'bucket_id', type = 'unsigned' }, 132 | { name = 'value', type = 'any', is_nullable = true } 133 | }, 134 | if_not_exists = true, 135 | }) 136 | f:create_index('primary', { 137 | parts = { 'key' }, 138 | if_not_exists = true, 139 | }) 140 | f:create_index('bucket_id', { 141 | parts = { 'bucket_id' }, 142 | if_not_exists = true, 143 | unique = false 144 | }) 145 | utils.register_sharding_key('my_sharded_space', {'key'}) 146 | return true 147 | end 148 | ``` 149 | Warning! It's not correct to specify 'bucket_id' as a 'key' parameter for register_sharding_key(). 150 | The 'bucket_id' field is a place where the output of sharding function is saved to. 151 | 152 | * Before 0.6.0, each storage migration run time was limited to 3600 seconds (#66). 153 | If your migrations run longer than this limit, it will result in timeout error. 154 | 155 | Starting with 0.6.0, you may configure this value with clusterwide config to 156 | allow longer migrations. Default is 3600 seconds. 157 | ```yaml 158 | migrations: 159 | options: 160 | storage_timeout: 43200 # in seconds 161 | ``` 162 | 163 | * To run migrations code on a specific roles use `utils.check_roles_enabled`: 164 | ```lua 165 | up = function() 166 | local utils = require('migrator.utils') 167 | if utils.check_roles_enabled({'vshard-storage'}) then 168 | local f = box.schema.create_space('my_sharded_space', { 169 | format = { 170 | { name = 'key', type = 'string' }, 171 | { name = 'bucket_id', type = 'unsigned' }, 172 | { name = 'value', type = 'any', is_nullable = true } 173 | }, 174 | if_not_exists = true, 175 | }) 176 | f:create_index('primary', { 177 | parts = { 'key' }, 178 | if_not_exists = true, 179 | }) 180 | f:create_index('bucket_id', { 181 | parts = { 'bucket_id' }, 182 | if_not_exists = true, 183 | unique = false 184 | }) 185 | utils.register_sharding_key('my_sharded_space', {'key'}) 186 | return true 187 | elseif utils.check_roles_enabled({'my-role'}) then 188 | my_specific_role_logic() 189 | end 190 | end 191 | ``` 192 | 193 | * To get a list of applied migrations make a GET request to 194 | `http://:/migrations/applied` or call 195 | `require('migrator').get_applied()` on any cluster instance. This method will return a list of 196 | applied migrations grouped by a leader node. 197 | 198 | ## Upgrade from 0.* versions. 199 | 200 | Applied migrations names storage method has been changed in `1.*` version: applied migrations list 201 | is stored on each cluster node separately in `_migrations` space. An additional step is required 202 | before applying migrations after update from `0.*`: call 203 | `curl -X POST http://:/migrations/move_migrations_state` or connect 204 | to any instance of cluster and call `require('migrator').move_migrations_state()`. This method 205 | does the following: 206 | 207 | - copies applied migrations names from cluster-wide configuration to the `_migrations` space on 208 | leader nodes. 209 | - if copying is succeeded on all leaders, removes the list from the cluster-wide configuration. 210 | 211 | ## Rolling back to 0.* versions. 212 | 213 | To perform a downgrade from `1.*` to `0.*` version do the following: 214 | 215 | - get a list of the applied migrations using the `get_applied` API. 216 | - set list of migrations in cluster-wide config: 217 | ```yaml 218 | migrations: 219 | applied: 220 | - 01_migration.lua 221 | - 02_migration.lua 222 | . . . 223 | ``` 224 | - remove `_migrations` space and `_migrations_id_seq` on all nodes if necessary. 225 | - perform downgrade of the `migrations`. 226 | 227 | ## Limitations 228 | - all migrations will be run on all cluster nodes (no partial migrations); 229 | - no pre-validation for migrations code (yet), so you should test them beforehands; 230 | - no support to run a single migration (yet); 231 | - no dry-run (yet); 232 | - no rolling back unsuccessful migrations (yet); 233 | - no migrating `down` (yet). 234 | -------------------------------------------------------------------------------- /deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Call this scripts to install test dependencies 3 | 4 | set -e 5 | 6 | # Test dependencies: 7 | tarantoolctl rocks install luatest 0.5.7 8 | tarantoolctl rocks install luacov 0.13.0 9 | tarantoolctl rocks install luacheck 0.26.0 10 | tarantoolctl rocks make migrations-scm-1.rockspec 11 | -------------------------------------------------------------------------------- /doc/assets/config-loader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/migrations/ae4d8745bb7b07234ee7fd678030ef44f1db3543/doc/assets/config-loader.png -------------------------------------------------------------------------------- /migrations-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = 'migrations' 2 | version = 'scm-1' 3 | source = { 4 | url = 'git+https://github.com/tarantool/migrations.git'; 5 | branch = 'master'; 6 | } 7 | -- Put any modules your app depends on here 8 | dependencies = { 9 | 'tarantool', 10 | 'lua >= 5.1', 11 | 'checks >= 3.0.1-1, <4.0.0', 12 | 'cartridge >= 2.0.1-1, <3.0.0', 13 | } 14 | build = { 15 | type = 'make', 16 | build_target = 'all', 17 | install = { 18 | lua = { 19 | ['migrator'] = 'migrator/init.lua', 20 | ['migrator.version'] = 'migrator/version.lua', 21 | ['migrator.utils'] = 'migrator/utils.lua', 22 | ['migrator.directory-loader'] = 'migrator/directory-loader.lua', 23 | ['migrator.config-loader'] = 'migrator/config-loader.lua', 24 | }, 25 | }, 26 | build_variables = { 27 | version = 'scm-1', 28 | }, 29 | install_pass = false, 30 | } 31 | -------------------------------------------------------------------------------- /migrator/config-loader.lua: -------------------------------------------------------------------------------- 1 | local fio = require('fio') 2 | local checks = require('checks') 3 | local log = require('log') 4 | 5 | local Loader = {} 6 | Loader.__index = Loader 7 | 8 | local function __must_sort(m) 9 | table.sort(m, function(a, b) return a.name < b.name end) 10 | end 11 | 12 | local function assert_migration(migration) 13 | checks({ 14 | name = 'string', 15 | up = 'function', 16 | }) 17 | return migration 18 | end 19 | 20 | function Loader:list() 21 | local ca = require("cartridge.confapplier") 22 | local cfg = ca.get_active_config():get_plaintext() 23 | if cfg == nil then 24 | return {} 25 | end 26 | 27 | local result = {} 28 | 29 | for k, v in pairs(cfg) do 30 | if k:startswith(self.config_section_name) then 31 | local migration, err = loadstring(v) 32 | if migration ~= nil then 33 | local m = migration() 34 | m.name = fio.basename(k) 35 | assert_migration(m) 36 | table.insert(result, m) 37 | else 38 | log.warn('Cannot load %s: %s', v, err) 39 | end 40 | end 41 | end 42 | 43 | __must_sort(result) 44 | 45 | return result 46 | end 47 | 48 | local function new() 49 | local loader = { 50 | config_section_name = 'migrations/source', 51 | } 52 | setmetatable(loader, Loader) 53 | return loader 54 | end 55 | 56 | return { 57 | __must_sort = __must_sort, 58 | new = new, 59 | } 60 | -------------------------------------------------------------------------------- /migrator/directory-loader.lua: -------------------------------------------------------------------------------- 1 | local fio = require('fio') 2 | local checks = require('checks') 3 | local log = require('log') -- luacheck: ignore 4 | 5 | local Loader = {} 6 | Loader.__index = Loader 7 | 8 | local function assert_migration(migration) 9 | checks({ 10 | name = 'string', 11 | up = 'function', 12 | }) 13 | return migration 14 | end 15 | 16 | function Loader:list() 17 | local result = {} 18 | local search_folder = fio.pathjoin(package.searchroot(), self.dir_name) 19 | if not fio.path.is_dir(search_folder) then error(('Path %s is not valid'):format(search_folder)) end 20 | local files = fio.listdir(search_folder) or {} 21 | table.sort(files) 22 | for _, v in ipairs(files) do 23 | local migration, err = dofile(fio.pathjoin(search_folder, v)) 24 | if migration ~= nil then 25 | migration.name = v 26 | assert_migration(migration) 27 | table.insert(result, migration) 28 | else 29 | log.warn('Cannot load %s: %s', v, err) 30 | end 31 | end 32 | return result 33 | end 34 | 35 | local function new(dir_name) 36 | checks('?string') 37 | dir_name = dir_name or 'migrations' 38 | local loader = { 39 | dir_name = dir_name, 40 | } 41 | setmetatable(loader, Loader) 42 | return loader 43 | end 44 | 45 | return { 46 | new = new 47 | } 48 | 49 | 50 | -------------------------------------------------------------------------------- /migrator/init.lua: -------------------------------------------------------------------------------- 1 | local rpc = require('cartridge.rpc') 2 | local pool = require('cartridge.pool') 3 | local cartridge = require('cartridge') 4 | local confapplier = require('cartridge.confapplier') 5 | local get_topology_api = require('cartridge.lua-api.get-topology') 6 | 7 | local log = require('log') 8 | local fiber = require('fiber') 9 | local json = require('json') 10 | local checks = require('checks') 11 | local fun = require('fun') 12 | 13 | local ddl = require('ddl') 14 | 15 | local module_name = 'migrator' 16 | local vars = require('cartridge.vars').new(module_name) 17 | local migrator_error = require('errors').new_class(module_name) 18 | 19 | local utils = require('migrator.utils') 20 | vars:new('loader', require('migrator.directory-loader').new()) 21 | vars:new('use_cartridge_ddl', true) 22 | 23 | 24 | local function get_diff(applied) 25 | local to_apply = {} 26 | local migrations_map = {} 27 | for _, migration in ipairs(vars.loader:list()) do 28 | if utils.value_in(migration.name, applied) then 29 | log.verbose('%s migration is already applied', migration.name) 30 | else 31 | table.insert(to_apply, migration.name) 32 | migrations_map[migration.name] = migration 33 | end 34 | end 35 | return to_apply, migrations_map 36 | end 37 | 38 | local function get_schema() 39 | return ddl.get_schema() 40 | end 41 | 42 | -- since migrations might be triggered on a replica, we should fetch ddl schema from actual master 43 | -- see https://github.com/tarantool/migrations/issues/56 for details 44 | local function fetch_schema() 45 | if vars.use_cartridge_ddl ~= true then return nil end 46 | local schema, err = rpc.call('migrator', 'get_schema', nil, {prefer_local = true, leader_only = true}) 47 | if err ~= nil then 48 | log.error(err) 49 | error(err) 50 | end 51 | return schema 52 | end 53 | 54 | local DEFAULT_STORAGE_TIMEOUT = 3600 55 | 56 | local function get_storage_timeout() 57 | local config = confapplier.get_readonly('migrations') or {} 58 | local options = config['options'] or {} 59 | if options.storage_timeout ~= nil then 60 | return options.storage_timeout 61 | end 62 | return DEFAULT_STORAGE_TIMEOUT 63 | end 64 | 65 | -- Makes sure that the passed migrations match the list from the local reader. 66 | local function check_migrations_consistency(migrations_per_instance) 67 | local names = fun.iter(vars.loader:list()):map(function(m) return m.name end):totable() 68 | for host, applied in pairs(migrations_per_instance) do 69 | if utils.compare(names, applied) == false then 70 | local err_msg = string.format('Inconsistent migrations in cluster: ' .. 71 | 'expected: %s, applied on %s: %s', json.encode(names), host, json.encode(applied)) 72 | log.error(err_msg) 73 | error(err_msg) 74 | end 75 | end 76 | end 77 | 78 | -- Makes sure there is no migrations list in cluster config. 79 | local function check_no_migrations_in_config() 80 | local config = confapplier.get_readonly('migrations') 81 | if config ~= nil and config.applied ~= nil and #config.applied > 0 then 82 | error('Cannot perform an upgrade. A list of applied migrations is found in cluster ' .. 83 | 'config. Current migrator version works only with local list of applied migrations. ' .. 84 | 'Run "move_migrations_state" to move cluster-wide migrations state to local ' .. 85 | 'storage before up invocation.') 86 | end 87 | end 88 | 89 | -- Returns server alias by URI. 90 | local function get_server_alias(instance_uri) 91 | local servers = get_topology_api.get_servers(function(server) 92 | return server.uri == instance_uri 93 | end) 94 | if not servers or #servers == 0 or #servers > 1 or not servers[1].alias then 95 | return instance_uri 96 | end 97 | return servers[1].alias 98 | end 99 | 100 | local function create_space_for_storing_applied_migrations() 101 | box.schema.sequence.create('_migrations_id_seq', { if_not_exists = true }) 102 | box.schema.create_space('_migrations', { 103 | format = { 104 | {'id', type='unsigned', is_nullable=false}, 105 | {'name', type='string', is_nullable=false}, 106 | }, 107 | if_not_exists = true, 108 | }) 109 | box.space._migrations:create_index('primary', { 110 | sequence = '_migrations_id_seq', 111 | if_not_exists = true, 112 | }) 113 | -- Workaround for https://github.com/tarantool/ddl/issues/122 114 | -- If index is created by ddl, sequence is not set. Check and update is required. 115 | if box.space._migrations.index.primary ~= nil 116 | and box.space._migrations.index.primary.sequence_id == nil then 117 | box.space._migrations.index.primary:alter({sequence = '_migrations_id_seq'}) 118 | end 119 | end 120 | 121 | --- Run migrations on all nodes in the cluster 122 | -- Throws an exception in case of any problems 123 | -- @function up 124 | -- @return table of applied migration names grouped by host, e. g. { 125 | -- [router] = {"01_first.lua", "02_second.lua", "03_sharded.lua"} 126 | -- [s1-master] = {"03_sharded.lua"} 127 | -- } 128 | -- If no migrations applied, the table is empty. 129 | local function up() 130 | check_no_migrations_in_config() 131 | 132 | local result = {} 133 | local all_migrations = {} 134 | local fibers = {} 135 | for _, instance_uri in pairs(rpc.get_candidates('migrator', { leader_only = true })) do 136 | log.info('Preparing to run migrations on %s', instance_uri) 137 | local f = fiber.new(function() 138 | local conn = pool.connect(instance_uri) 139 | local applied_migrations, err = conn:call( 140 | '__cluster_rpc_call_local', 141 | { 'migrator', 'upgrade' }, 142 | {timeout = get_storage_timeout()}) 143 | if err ~= nil then 144 | log.warn('Cannot apply migrations on %s: %s', instance_uri, json.encode(err)) 145 | error(json.encode(err)) 146 | end 147 | local server_alias = get_server_alias(instance_uri) 148 | log.verbose('Instance %s applied migrations: %s', 149 | server_alias, json.encode(applied_migrations.applied_now)) 150 | if #applied_migrations.applied_now > 0 then 151 | result[server_alias] = applied_migrations.applied_now 152 | end 153 | all_migrations[instance_uri] = applied_migrations.applied 154 | return true 155 | end) 156 | f:set_joinable(true) 157 | f:name(instance_uri, {truncate=true}) 158 | table.insert(fibers, f) 159 | end 160 | 161 | local errors = {} 162 | for _, f in pairs(fibers) do 163 | local ok, join_result = f:join() 164 | if not ok then table.insert(errors, join_result) end 165 | end 166 | if #errors > 0 then 167 | local err_msg = string.format('Errors happened during migrations: %s', json.encode(errors)) 168 | log.error(err_msg) 169 | error(err_msg) 170 | end 171 | 172 | log.verbose('All fibers joined, results are: %s', json.encode(result)) 173 | check_migrations_consistency(all_migrations) 174 | 175 | local patch = { 176 | ['schema.yml'] = fetch_schema() 177 | } 178 | log.info('Migrations applied on all storages, changing clusterwide configuration...') 179 | log.verbose('All migrations applied successfully, changing cluster-wide configuration with a patch: %s', json.encode(patch)) 180 | 181 | local _, err = cartridge.config_patch_clusterwide(patch) 182 | if err ~= nil then 183 | log.error(err) 184 | error(err) 185 | end 186 | log.info('Migrations applied successfully!') 187 | 188 | return result 189 | end 190 | 191 | --- Get list of applied migration names on local server. 192 | -- @function get_applied_local 193 | -- @return table of applied migration names on local server. 194 | local function get_applied_local() 195 | local result = {} 196 | local counter = 0 197 | for _, migration in box.space._migrations:pairs() do 198 | table.insert(result, migration['name']) 199 | counter = counter + 1 200 | if counter >= 1000 then 201 | fiber.yield() 202 | counter = 0 203 | end 204 | end 205 | return result 206 | end 207 | 208 | --- Get list of applied migration names in cluster. 209 | -- Throws an exception in case of any problems 210 | -- @function get_applied 211 | -- @return table of applied migrations in cluster grouped by leader aliases. 212 | local function get_applied() 213 | local leaders = rpc.get_candidates('migrator',{ leader_only = true }) 214 | log.info('Preparing getting applied migrations from %s', json.encode(leaders)) 215 | local result, errmap = pool.map_call('_G.__cluster_rpc_call_local', 216 | {'migrator', 'get_applied_local'}, { 217 | uri_list = leaders, 218 | timeout = get_storage_timeout(), 219 | }) 220 | if errmap ~= nil then 221 | for uri, err in pairs(errmap) do 222 | log.error('Cannot get migrations state from %s: %s', 223 | uri, json.encode(err)) 224 | end 225 | error("Failed to get migrations state: " .. json.encode(errmap)) 226 | end 227 | 228 | local migrations_by_alias = {} 229 | for uri, migrations in pairs(result) do 230 | migrations_by_alias[get_server_alias(uri)] = migrations 231 | end 232 | 233 | return migrations_by_alias 234 | end 235 | 236 | -- Append migration names to the _migrations space. 237 | local function append_migrations_to_local_space(migrations) 238 | local copied_migrations = {} 239 | local local_migrations = get_applied_local() 240 | for i, migration in ipairs(migrations) do 241 | if i <= #local_migrations then 242 | if local_migrations[i] ~= migration then 243 | local err_msg = string.format('Inconsistency between cluster-wide and local ' .. 244 | 'applied migrations list: migration #%d in config: %s, in local space: %s', 245 | i, migration, local_migrations[i]) 246 | log.error(err_msg) 247 | error(err_msg) 248 | end 249 | else 250 | box.space._migrations:insert{box.NULL, migration} 251 | table.insert(copied_migrations, migration) 252 | end 253 | end 254 | log.info("Migration names copied: %s", json.encode(copied_migrations)) 255 | return copied_migrations 256 | end 257 | 258 | --- Run applied migrations list copying from from cluster-wide config to local space on each storage. 259 | -- Throws an exception in case of any problems 260 | -- @function move_migrations_state 261 | -- @return table of instance uris with migration names copied. 262 | local function move_migrations_state(current_server_only) 263 | local config = confapplier.get_readonly('migrations') 264 | 265 | if config == nil or config.applied == nil or #config.applied == 0 then 266 | log.info('There are no applied migrations in cluster config. Skip moving state.') 267 | return {} 268 | end 269 | 270 | if current_server_only then 271 | return append_migrations_to_local_space(config.applied) 272 | end 273 | 274 | -- Copy state on all leaders. 275 | local leaders = rpc.get_candidates('migrator',{leader_only = true }) 276 | log.info('Preparing copying migrations on %s', json.encode(leaders)) 277 | local result, errmap = pool.map_call('_G.__cluster_rpc_call_local', 278 | {'migrator', 'move_migrations_state', {true}}, { 279 | uri_list = leaders, 280 | timeout = get_storage_timeout(), 281 | }) 282 | if errmap ~= nil then 283 | for uri, err in pairs(errmap) do 284 | log.error('Failed to copy migrations state from cluster config on %s: %s', 285 | uri, json.encode(err)) 286 | end 287 | error("Failed to copy migrations state: " .. json.encode(errmap)) 288 | end 289 | 290 | -- Remove applied migrations from cluster-wide configuration. 291 | local patch = { 292 | ['migrations'] = { 293 | ['applied'] = box.NULL 294 | } 295 | } 296 | log.info('Migrations are copied on all storages, removing them from clusterwide configuration...') 297 | log.verbose('Changing cluster-wide configuration with a patch: %s', json.encode(patch)) 298 | 299 | local _, err = cartridge.config_patch_clusterwide(patch) 300 | if err ~= nil then 301 | log.error(err) 302 | error(err) 303 | end 304 | log.info('Applied migrations are moved successfully: %s', json.encode(leaders)) 305 | 306 | local migrations_by_alias = {} 307 | for uri, migrations in pairs(result) do 308 | migrations_by_alias[get_server_alias(uri)] = migrations 309 | end 310 | 311 | return migrations_by_alias 312 | end 313 | 314 | local function init() 315 | local httpd = cartridge.service_get('httpd') 316 | if not httpd then return true end 317 | 318 | httpd:route({ path = '/migrations/up', method = 'POST' }, function(req) 319 | local resp = req:render({ json = { applied = up() }}) 320 | resp.status = 200 321 | return resp 322 | end) 323 | 324 | httpd:route({ path = '/migrations/move_migrations_state', method = 'POST' }, function(req) 325 | local resp = req:render({ json = { migrations_moved = move_migrations_state() }}) 326 | resp.status = 200 327 | return resp 328 | end) 329 | 330 | httpd:route({ path = '/migrations/applied', method = 'GET' }, function(req) 331 | local resp = req:render({ json = { applied = get_applied() }}) 332 | resp.status = 200 333 | return resp 334 | end) 335 | end 336 | 337 | local function upgrade() 338 | check_no_migrations_in_config() 339 | 340 | local migrations = {applied_now = {}, applied = get_applied_local()} 341 | local to_apply, migrations_map = get_diff(migrations.applied) 342 | for _, name in ipairs(to_apply) do 343 | local _, err = migrator_error:pcall(migrations_map[name].up) 344 | if err ~= nil then 345 | log.error('Migration %s not applied: %s', name, err) 346 | error(err) 347 | end 348 | box.space._migrations:insert{box.NULL, name} 349 | table.insert(migrations.applied_now, name) 350 | table.insert(migrations.applied, name) 351 | log.verbose('Migration %s applied successfully', name) 352 | end 353 | return migrations 354 | end 355 | 356 | local function set_loader(loader) 357 | checks('table') 358 | assert(type(loader.list) == 'function') 359 | vars.loader = loader 360 | end 361 | 362 | local function set_use_cartridge_ddl(use_cartridge_ddl) 363 | checks('boolean') 364 | vars.use_cartridge_ddl = use_cartridge_ddl 365 | end 366 | 367 | local function validate_config(conf_new) 368 | local migrations_conf = conf_new['migrations'] or {} 369 | local options = migrations_conf['options'] or {} 370 | 371 | if options.storage_timeout ~= nil then 372 | assert( 373 | type(options.storage_timeout) == 'number', 374 | ("'options.storage_timeout' must be a number, %s provided"):format(type(options.storage_timeout))) 375 | assert( 376 | options.storage_timeout >= 0, 377 | "'options.storage_timeout' must be a non-negative number") 378 | end 379 | 380 | return true 381 | end 382 | 383 | local function apply_config() 384 | if not box.info().ro then 385 | create_space_for_storing_applied_migrations() 386 | end 387 | 388 | return true 389 | end 390 | 391 | return { 392 | init = init, 393 | 394 | validate_config = validate_config, 395 | apply_config = apply_config, 396 | 397 | permanent = true, 398 | 399 | upgrade = upgrade, 400 | up = up, 401 | 402 | set_loader = set_loader, 403 | set_use_cartridge_ddl = set_use_cartridge_ddl, 404 | 405 | get_schema = get_schema, 406 | 407 | move_migrations_state = move_migrations_state, 408 | get_applied = get_applied, 409 | get_applied_local = get_applied_local, 410 | 411 | _VERSION = require('migrator.version'), 412 | } 413 | -------------------------------------------------------------------------------- /migrator/utils.lua: -------------------------------------------------------------------------------- 1 | local log = require('log') 2 | 3 | local function value_in(val, arr) 4 | for i, elem in ipairs(arr) do 5 | if val == elem then 6 | return true, i 7 | end 8 | end 9 | return false 10 | end 11 | 12 | local function compare(a, b) 13 | if #a ~= #b then return false end 14 | for i, v in ipairs(a) do 15 | if b[i] ~= v then return false end 16 | end 17 | return true 18 | end 19 | 20 | 21 | -- TODO: remove this ugly hack 22 | --- 23 | --- Set fields that are used for sharding key calculation for a specified space. 24 | --- 25 | --- @param space_name string name of sharded space 26 | --- @param key table array of field names that will be used as input of sharding function 27 | --- 28 | local function register_sharding_key(space_name, key) 29 | 30 | if value_in('bucket_id', key) then 31 | log.error("Wrong sharding key: 'bucket_id' is used as input of sharding function for space '" 32 | .. space_name .. "'") 33 | end 34 | 35 | if box.space._ddl_sharding_key == nil then 36 | local sharding_space = box.schema.space.create('_ddl_sharding_key', { 37 | format = { 38 | {name = 'space_name', type = 'string', is_nullable = false}, 39 | {name = 'sharding_key', type = 'array', is_nullable = false} 40 | }, 41 | if_not_exists = true, 42 | }) 43 | sharding_space:create_index( 44 | 'space_name', { 45 | type = 'TREE', 46 | unique = true, 47 | parts = {{'space_name', 'string', is_nullable = false}}, 48 | if_not_exists = true, 49 | } 50 | ) 51 | end 52 | box.space._ddl_sharding_key:replace{space_name, key} 53 | end 54 | 55 | 56 | -- Check whether expected cartridge roles are enabled on a server. 57 | --- @param roles_list table array of role names 58 | --- 59 | local function check_roles_enabled(roles_list) 60 | local topology = require('cartridge.confapplier').get_readonly('topology') 61 | local cur_roles = topology.replicasets[box.info.cluster.uuid].roles 62 | 63 | for _, rname in pairs(roles_list) do 64 | if cur_roles[rname] == nil then 65 | return false 66 | end 67 | end 68 | 69 | return true 70 | end 71 | 72 | return { 73 | value_in = value_in, 74 | compare = compare, 75 | 76 | register_sharding_key = register_sharding_key, 77 | check_roles_enabled = check_roles_enabled, 78 | } 79 | -------------------------------------------------------------------------------- /migrator/version.lua: -------------------------------------------------------------------------------- 1 | -- Contains the module version. 2 | -- Requires manual update in case of release commit. 3 | 4 | return '1.0.2' 5 | -------------------------------------------------------------------------------- /test/entrypoint/check_roles_enabled_init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | package.preload['cartridge.roles.role1-dep'] = function() 6 | return { 7 | role_name = 'cartridge.roles.role1-dep', 8 | } 9 | end 10 | 11 | package.preload['cartridge.roles.role1'] = function() 12 | return { 13 | role_name = 'cartridge.roles.role1', 14 | dependencies = {'cartridge.roles.role1-dep'}, 15 | } 16 | end 17 | 18 | package.preload['cartridge.roles.role2'] = function() 19 | return { 20 | role_name = 'cartridge.roles.role2', 21 | } 22 | end 23 | 24 | local cartridge = require('cartridge') 25 | local ok, err = cartridge.cfg({ 26 | workdir = 'tmp/db', 27 | roles = { 28 | 'cartridge.roles.role1-dep', 29 | 'cartridge.roles.role1', 30 | 'cartridge.roles.role2', 31 | 'cartridge.roles.vshard-storage', 32 | 'cartridge.roles.vshard-router', 33 | 'migrator', 34 | }, 35 | cluster_cookie = 'secret-cluster-cookie', 36 | }, { 37 | log_level = 5 38 | }) 39 | 40 | require('migrator').set_loader(require('migrator.directory-loader').new('test/integration/migrations_check_roles_enabled')) 41 | 42 | assert(ok, tostring(err)) 43 | -------------------------------------------------------------------------------- /test/helper.lua: -------------------------------------------------------------------------------- 1 | -- This file is required automatically by luatest. 2 | -- Add common configuration here. 3 | 4 | local digest = require('digest') 5 | local fio = require('fio') 6 | local t = require('luatest') 7 | 8 | local helper = {} 9 | 10 | helper.root = fio.cwd() 11 | local tmpdir = os.getenv('TMPDIR') 12 | and fio.pathjoin(os.getenv('TMPDIR'), 13 | 'migrations.' .. digest.base64_encode(digest.urandom(9), {urlsafe = true})) 14 | or fio.pathjoin(helper.root, 'tmp') 15 | helper.datadir = fio.pathjoin(tmpdir, 'db_test') 16 | 17 | package.setsearchroot(helper.root) 18 | 19 | helper.server_command = fio.pathjoin(helper.root, 'test', 'init.lua') 20 | 21 | t.before_suite(function() 22 | fio.rmtree(helper.datadir) 23 | fio.mktree(helper.datadir) 24 | box.cfg{} 25 | end) 26 | 27 | return helper 28 | -------------------------------------------------------------------------------- /test/helper/integration.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') -- luacheck:ignore 2 | 3 | local shared = require('test.helper') 4 | 5 | local helper = { shared = shared } 6 | 7 | return helper 8 | -------------------------------------------------------------------------------- /test/helper/utils.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local luatest_utils = require('luatest.utils') 3 | 4 | local function set_sections(g, sections) 5 | return g.cluster.main_server:graphql({ query = [[ 6 | mutation($sections: [ConfigSectionInput!]) { 7 | cluster { 8 | config(sections: $sections) { 9 | filename 10 | content 11 | } 12 | } 13 | }]], 14 | variables = { sections = sections } 15 | }).data.cluster.config 16 | end 17 | 18 | local function cleanup(g) 19 | local sections = g.cluster.main_server.net_box:eval([[ 20 | return require('fun').iter( 21 | require('migrator.config-loader').new():list() 22 | ):map(function(x) return x.name end):totable() 23 | ]]) 24 | for _, name in pairs(sections) do 25 | set_sections(g, { { filename = 'migrations/source/' .. name, content = box.NULL } }) 26 | end 27 | set_sections(g, { { filename = 'schema.yml', content = box.NULL } }) 28 | 29 | g.cluster.main_server.net_box:eval([[require('cartridge').config_patch_clusterwide({migrations = {applied = box.NULL }})]]) 30 | local spaces_to_remove = { "first", "sharded" } 31 | for _, server in ipairs(g.cluster.servers) do 32 | for _, space in pairs(spaces_to_remove) do 33 | g.cluster:retrying({ timeout = 10 }, function() 34 | t.assert(server.net_box:eval([[ 35 | return (function(space) 36 | if box.cfg.read_only then 37 | return true 38 | end 39 | 40 | if box.space[space] ~= nil then 41 | return box.space[space]:drop() == nil 42 | end 43 | 44 | return true 45 | end)(...) 46 | ]], { space })) 47 | end) 48 | end 49 | 50 | -- Cleanup _migrations space. 51 | g.cluster:retrying({ timeout = 5 }, function() 52 | t.assert(server:eval([[ 53 | if box.info.ro then 54 | return true 55 | end 56 | if box.sequence._migrations_id_seq ~= nil then 57 | box.sequence._migrations_id_seq:reset() 58 | box.sequence._migrations_id_seq:set(0) 59 | end 60 | if box.space._migrations ~= nil and box.space._migrations:truncate() ~= nil then 61 | return false 62 | end 63 | return true 64 | ]])) 65 | end) 66 | g.cluster:retrying({ timeout = 5 }, function() 67 | t.assert(server:eval([[ 68 | return box.space._migrations:len() == 0 69 | ]])) 70 | end) 71 | end 72 | 73 | -- Reset loader to default. 74 | for _, server in pairs(g.cluster.servers) do 75 | server:eval([[require('migrator').set_loader( 76 | require('migrator.directory-loader').new('test/integration/migrations')) 77 | ]]) 78 | end 79 | 80 | g.cluster:retrying({ timeout = 1 }, function() 81 | for _, server in pairs(g.cluster.servers) do 82 | t.assert(server.net_box:eval('return box.space.first == nil')) 83 | end 84 | end) 85 | end 86 | 87 | local function parse_module_version(str) 88 | -- https://github.com/tarantool/luatest/blob/f37b353b77be50a1f1ce87c1ff2edf0c1b96d5d1/luatest/utils.lua#L166-L173 89 | local splitstr = str:split('.') 90 | local major = tonumber(splitstr[1]:match('%d+')) 91 | local minor = tonumber(splitstr[2]:match('%d+')) 92 | local patch = tonumber(splitstr[3]:match('%d+')) 93 | return luatest_utils.version(major, minor, patch) 94 | end 95 | 96 | local function is_ddl_supports_sequences() 97 | local ddl = require('ddl') 98 | 99 | if ddl._VERSION == nil then 100 | return false 101 | end 102 | 103 | local parsed_ddl_version = parse_module_version(ddl._VERSION) 104 | local are_sequences_supported = luatest_utils.version_ge( 105 | parsed_ddl_version, 106 | luatest_utils.version(1, 7, 0) 107 | ) 108 | 109 | return are_sequences_supported 110 | end 111 | 112 | local function downgrade_ddl_schema_if_required(ddl_schema) 113 | if not is_ddl_supports_sequences then 114 | for _, space in pairs(ddl_schema.spaces) do 115 | for _, index in ipairs(space.indexes) do 116 | index.sequence = nil 117 | end 118 | end 119 | 120 | ddl_schema.sequences = nil 121 | end 122 | 123 | return ddl_schema 124 | end 125 | 126 | return { 127 | set_sections = set_sections, 128 | cleanup = cleanup, 129 | downgrade_ddl_schema_if_required = downgrade_ddl_schema_if_required, 130 | } 131 | -------------------------------------------------------------------------------- /test/init.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | local cartridge = require('cartridge') 6 | local ok, err = cartridge.cfg({ 7 | workdir = 'tmp/db', 8 | roles = { 9 | 'cartridge.roles.vshard-storage', 10 | 'cartridge.roles.vshard-router', 11 | 'migrator', 12 | }, 13 | cluster_cookie = 'migrations-test-cluster-cookie', 14 | roles_reload_allowed = true 15 | }, { 16 | log_level = 6 17 | }) 18 | require('migrator').set_loader(require('migrator.directory-loader').new('test/integration/migrations')) 19 | 20 | require('json').cfg{encode_use_tostring = true,} 21 | 22 | assert(ok, tostring(err)) 23 | -------------------------------------------------------------------------------- /test/integration/basic_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('integration_api') 3 | local fiber = require('fiber') -- luacheck: ignore 4 | 5 | local fio = require('fio') 6 | 7 | local cartridge_helpers = require('cartridge.test-helpers') 8 | 9 | local shared = require('test.helper.integration').shared 10 | local utils = require("test.helper.utils") 11 | 12 | local datadir = fio.pathjoin(shared.datadir, 'basic') 13 | 14 | g.cluster = cartridge_helpers.Cluster:new({ 15 | server_command = shared.server_command, 16 | datadir = datadir, 17 | use_vshard = true, 18 | replicasets = { 19 | { 20 | alias = 'api', 21 | uuid = cartridge_helpers.uuid('a'), 22 | roles = { 'vshard-router' }, 23 | servers = { { instance_uuid = cartridge_helpers.uuid('a', 1) } }, 24 | }, 25 | { 26 | alias = 'storage-1', 27 | uuid = cartridge_helpers.uuid('b'), 28 | roles = { 'vshard-storage' }, 29 | servers = { 30 | { instance_uuid = cartridge_helpers.uuid('b', 1), env = {TARANTOOL_HTTP_ENABLED = 'false'} }, 31 | { instance_uuid = cartridge_helpers.uuid('b', 2), env = {TARANTOOL_HTTP_ENABLED = 'false'} }, 32 | }, 33 | }, 34 | { 35 | alias = 'storage-2', 36 | uuid = cartridge_helpers.uuid('c'), 37 | roles = { 'vshard-storage' }, 38 | servers = { 39 | { instance_uuid = cartridge_helpers.uuid('c', 1), env = {TARANTOOL_HTTP_ENABLED = 'false'} }, 40 | { instance_uuid = cartridge_helpers.uuid('c', 2), env = {TARANTOOL_HTTP_ENABLED = 'false'} }, 41 | }, 42 | }, 43 | }, 44 | }) 45 | 46 | g.before_all(function() g.cluster:start() end) 47 | g.after_all(function() g.cluster:stop() end) 48 | g.after_each(function() utils.cleanup(g) end) 49 | 50 | local cases = { 51 | with_config_loader = function() 52 | for _, server in pairs(g.cluster.servers) do 53 | server.net_box:eval([[ 54 | require('migrator').set_loader( 55 | require('migrator.config-loader').new() 56 | ) 57 | ]]) 58 | end 59 | 60 | local files = { "01_first.lua", "02_second.lua", "03_sharded.lua" } 61 | for _, v in ipairs(files) do 62 | local file = fio.open('test/integration/migrations/' .. v) 63 | local content = file:read() 64 | utils.set_sections(g, { { filename = "migrations/source/" .. v, content = content } }) 65 | file:close() 66 | end 67 | end, 68 | with_directory_loader = function() 69 | for _, server in pairs(g.cluster.servers) do 70 | server.net_box:eval([[ 71 | require('migrator').set_loader( 72 | require('migrator.directory-loader').new('test/integration/migrations') 73 | ) 74 | ]]) 75 | end 76 | end 77 | } 78 | 79 | for k, configure_func in pairs(cases) do 80 | g['test_basic_' .. k] = function() 81 | configure_func() 82 | 83 | for _, server in pairs(g.cluster.servers) do 84 | t.assert(server.net_box:eval('return box.space.first == nil'), server.alias) 85 | end 86 | 87 | -- gh-26 - check that httpd is disabled on some nodes 88 | t.assert_covers( 89 | g.cluster:server('storage-2-2'):http_request('get', '/', {raise = false}), 90 | {status = 595, reason = "Couldn't connect to server"} 91 | ) 92 | 93 | local main = g.cluster.main_server 94 | local result = main:http_request('post', '/migrations/up', { json = {} }) 95 | for _, server in pairs(g.cluster.servers) do 96 | -- spaces may be created with a slight delay on replicas 97 | g.cluster:retrying({ timeout = 5 }, function() 98 | t.assert_not(server.net_box:eval('return box.space.first == nil'), server.alias) 99 | end) 100 | end 101 | 102 | local expected_applied = { 103 | ["api-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"}, 104 | ["storage-1-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"}, 105 | ["storage-2-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"}, 106 | } 107 | t.assert_equals(result.json, { applied = expected_applied }) 108 | 109 | local config = main:download_config() 110 | 111 | local expected_schema = { 112 | schema = { 113 | spaces = { 114 | _migrations = { 115 | engine = "memtx", 116 | format = { 117 | {is_nullable = false, name = "id", type = "unsigned"}, 118 | {is_nullable = false, name = "name", type = "string"} 119 | }, 120 | indexes = { 121 | { 122 | name = "primary", 123 | parts = {{is_nullable = false, path = "id", type = "unsigned"}}, 124 | sequence = "_migrations_id_seq", 125 | type = "TREE", 126 | unique = true, 127 | }, 128 | }, 129 | is_local = false, 130 | temporary = false, 131 | }, 132 | first = { 133 | engine = "memtx", 134 | format = { 135 | { is_nullable = false, name = "key", type = "string" }, 136 | { is_nullable = true, name = "value", type = "string" }, 137 | }, 138 | indexes = { 139 | { 140 | name = "primary", 141 | parts = { { is_nullable = false, path = "key", type = "string" } }, 142 | type = "TREE", 143 | unique = true, 144 | }, 145 | { 146 | name = "value", 147 | parts = { { is_nullable = true, path = "value", type = "string" } }, 148 | type = "TREE", 149 | unique = false, 150 | }, 151 | }, 152 | is_local = false, 153 | temporary = false, 154 | }, 155 | sharded = { 156 | engine = "memtx", 157 | format = { 158 | { is_nullable = false, name = "key", type = "string" }, 159 | { is_nullable = false, name = "bucket_id", type = "unsigned" }, 160 | { is_nullable = true, name = "value", type = "any" }, 161 | }, 162 | indexes = { 163 | { 164 | name = "primary", 165 | parts = { { is_nullable = false, path = "key", type = "string" } }, 166 | type = "TREE", 167 | unique = true, 168 | }, 169 | { 170 | name = "bucket_id", 171 | parts = { { is_nullable = false, path = "bucket_id", type = "unsigned" } }, 172 | type = "TREE", 173 | unique = false, 174 | }, 175 | }, 176 | is_local = false, 177 | sharding_key = { "key" }, 178 | temporary = false, 179 | }, 180 | }, 181 | sequences = { 182 | _migrations_id_seq = { 183 | cache = 0, 184 | cycle = false, 185 | max = 9223372036854775807ULL, 186 | min = 1, 187 | start = 1, 188 | step = 1, 189 | }, 190 | }, 191 | }, 192 | } 193 | 194 | expected_schema = utils.downgrade_ddl_schema_if_required(expected_schema) 195 | t.assert_covers(config, expected_schema) 196 | 197 | result = main:http_request('post', '/migrations/up', { json = {} }) 198 | t.assert_equals(result.json, { applied = {} }) 199 | end 200 | end 201 | 202 | g.test_gh_66_configurable_timeout = function(cg) 203 | local main = g.cluster.main_server 204 | 205 | main:eval([[ 206 | require('cartridge').config_patch_clusterwide({migrations = {applied = {}, options = {storage_timeout = 0.1}}}) 207 | ]]) 208 | 209 | for _, server in pairs(cg.cluster.servers) do 210 | server.net_box:eval([[ 211 | require('migrator').set_loader( 212 | require('migrator.directory-loader').new('test/integration/migrations-gh-66') 213 | ) 214 | ]]) 215 | end 216 | 217 | local status, resp = g.cluster.main_server:eval("return pcall(function() require('migrator').up() end)") 218 | t.assert_equals(status, false) 219 | t.assert_str_contains(tostring(resp), 'Errors happened during migrations') 220 | 221 | -- Depending on Tarantool version, error message may differ. 222 | local status_v1, err_v1 = pcall(function() 223 | t.assert_str_contains(tostring(resp), 'timed out') 224 | end) 225 | local status_v2, err_v2 = pcall(function() 226 | t.assert_str_contains(tostring(resp), 'Timeout exceeded') 227 | end) 228 | t.assert(status_v1 or status_v2, ("Got errors: %s, %s"):format(err_v1 and err_v1.message, err_v2 and err_v2.message)) 229 | end 230 | -------------------------------------------------------------------------------- /test/integration/check_roles_enabled_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('check_roles_enabled_integration') 3 | local fiber = require('fiber') -- luacheck: ignore 4 | 5 | local fio = require('fio') 6 | 7 | local cartridge_helpers = require('cartridge.test-helpers') 8 | 9 | local shared = require('test.helper.integration').shared 10 | 11 | local datadir = fio.pathjoin(shared.datadir, 'basic') 12 | 13 | g.cluster = cartridge_helpers.Cluster:new({ 14 | server_command = fio.pathjoin(shared.root, 'test', 'entrypoint', 'check_roles_enabled_init.lua'), 15 | datadir = datadir, 16 | use_vshard = true, 17 | base_advertise_port = 10400, 18 | base_http_port = 10090, 19 | replicasets = { 20 | { 21 | alias = 'api', 22 | uuid = cartridge_helpers.uuid('a'), 23 | roles = { 'vshard-router', 'migrator' }, 24 | servers = { { alias='api-master', instance_uuid = cartridge_helpers.uuid('a', 1) } }, 25 | }, 26 | { 27 | alias = 'storage-role1-role2', 28 | uuid = cartridge_helpers.uuid('b'), 29 | roles = { 'vshard-storage', 'cartridge.roles.role1', 'cartridge.roles.role2', 'migrator' }, 30 | servers = { 31 | { 32 | alias = 'storage-role1-role2-master', 33 | instance_uuid = cartridge_helpers.uuid('b', 1), 34 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 35 | }, 36 | { 37 | alias = 'storage-role1-role2-replica', 38 | instance_uuid = cartridge_helpers.uuid('b', 2), 39 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 40 | }, 41 | }, 42 | }, 43 | }, 44 | }) 45 | 46 | g.before_all(function(cg) cg.cluster:start() end) 47 | g.after_all(function(cg) cg.cluster:stop() end) 48 | 49 | g.test_check = function(cg) 50 | t.assert(cg.cluster:server('api-master'):exec(function() return require('migrator.utils').check_roles_enabled({'vshard-router'}) end)) 51 | t.assert(cg.cluster:server('api-master'):exec(function() 52 | return require('migrator.utils').check_roles_enabled({'vshard-router', 'migrator'}) 53 | end)) 54 | t.assert(cg.cluster:server('api-master'):exec(function() 55 | return require('migrator.utils').check_roles_enabled({'vshard-router', 'migrator', 'ddl-manager'}) 56 | end)) 57 | t.assert_not(cg.cluster:server('api-master'):exec(function() 58 | return require('migrator.utils').check_roles_enabled({'vshard-storage', 'migrator', 'ddl-manager'}) 59 | end)) 60 | 61 | t.assert(cg.cluster:server('storage-role1-role2-master'):exec(function() 62 | return require('migrator.utils').check_roles_enabled({'vshard-storage'}) 63 | end)) 64 | 65 | t.assert(cg.cluster:server('storage-role1-role2-master'):exec(function() 66 | return require('migrator.utils').check_roles_enabled( 67 | {'vshard-storage', 'migrator', 'ddl-manager', 'cartridge.roles.role1', 'cartridge.roles.role2', 'cartridge.roles.role1-dep'} 68 | ) 69 | end)) 70 | 71 | t.assert_not(cg.cluster:server('storage-role1-role2-master'):exec(function() 72 | return require('migrator.utils').check_roles_enabled({'vshard-router', 'migrator', 'cartridge.roles.role1-dep'}) 73 | end)) 74 | 75 | t.assert(cg.cluster:server('storage-role1-role2-replica'):exec(function() 76 | return require('migrator.utils').check_roles_enabled({'vshard-storage'}) 77 | end)) 78 | 79 | t.assert(cg.cluster:server('storage-role1-role2-replica'):exec(function() 80 | return require('migrator.utils').check_roles_enabled( 81 | {'vshard-storage', 'migrator', 'ddl-manager', 'cartridge.roles.role1', 'cartridge.roles.role2', 'cartridge.roles.role1-dep'} 82 | ) 83 | end)) 84 | 85 | t.assert_not(cg.cluster:server('storage-role1-role2-replica'):exec(function() 86 | return require('migrator.utils').check_roles_enabled( 87 | {'vshard-router', 'migrator', 'cartridge.roles.role1-dep'} 88 | ) 89 | end)) 90 | end 91 | 92 | g.test_with_migrations = function (cg) 93 | local status, resp = cg.cluster.main_server:exec(function() return pcall(function() require('migrator').up() end) end) 94 | t.assert_equals(status, true, resp) 95 | 96 | t.assert(cg.cluster:server('api-master'):exec(function() 97 | return rawget(_G, 'vshard-router-set') or false 98 | end)) 99 | t.assert_not(cg.cluster:server('api-master'):exec(function() 100 | return rawget(_G, 'vshard-storage-set') or false 101 | end)) 102 | 103 | t.assert_not(cg.cluster:server('storage-role1-role2-master'):exec(function() 104 | return rawget(_G, 'vshard-router-set') or false 105 | end)) 106 | t.assert(cg.cluster:server('storage-role1-role2-master'):exec(function() 107 | return rawget(_G, 'vshard-storage-set') or false 108 | end)) 109 | 110 | t.assert_not(cg.cluster:server('storage-role1-role2-replica'):exec(function() 111 | return rawget(_G, 'vshard-router-set') or false 112 | end)) 113 | t.assert_not(cg.cluster:server('storage-role1-role2-replica'):exec(function() 114 | return rawget(_G, 'vshard-storage-set') or false 115 | end)) 116 | end 117 | -------------------------------------------------------------------------------- /test/integration/console_run_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('console_run') 3 | local fiber = require('fiber') -- luacheck: ignore 4 | 5 | local fio = require('fio') 6 | 7 | local cartridge_helpers = require('cartridge.test-helpers') 8 | 9 | local shared = require('test.helper.integration').shared 10 | local utils = require("test.helper.utils") 11 | 12 | local datadir = fio.pathjoin(shared.datadir, 'console_run') 13 | 14 | g.cluster = cartridge_helpers.Cluster:new({ 15 | server_command = shared.server_command, 16 | datadir = datadir, 17 | use_vshard = true, 18 | replicasets = { 19 | { 20 | alias = 'api', 21 | uuid = cartridge_helpers.uuid('a'), 22 | roles = { 'vshard-router' }, 23 | servers = { { instance_uuid = cartridge_helpers.uuid('a', 1) } }, 24 | }, 25 | { 26 | alias = 'storage-1', 27 | uuid = cartridge_helpers.uuid('b'), 28 | roles = { 'vshard-storage' }, 29 | servers = { 30 | { instance_uuid = cartridge_helpers.uuid('b', 1), }, 31 | { instance_uuid = cartridge_helpers.uuid('b', 2), }, 32 | }, 33 | }, 34 | { 35 | alias = 'storage-2', 36 | uuid = cartridge_helpers.uuid('c'), 37 | roles = { 'vshard-storage' }, 38 | servers = { 39 | { instance_uuid = cartridge_helpers.uuid('c', 1), }, 40 | { instance_uuid = cartridge_helpers.uuid('c', 2), }, 41 | }, 42 | }, 43 | }, 44 | }) 45 | 46 | g.before_all(function() g.cluster:start() end) 47 | g.after_all(function() g.cluster:stop() end) 48 | g.after_each(function() utils.cleanup(g) end) 49 | 50 | local cases = { 51 | with_config_loader = function() 52 | for _, server in pairs(g.cluster.servers) do 53 | server.net_box:eval([[ 54 | require('migrator').set_loader( 55 | require('migrator.config-loader').new() 56 | ) 57 | ]]) 58 | end 59 | 60 | local files = { "01_first.lua", "02_second.lua", "03_sharded.lua" } 61 | for _, v in ipairs(files) do 62 | local file = fio.open('test/integration/migrations/' .. v) 63 | local content = file:read() 64 | utils.set_sections(g, { { filename = "migrations/source/" .. v, content = content } }) 65 | file:close() 66 | end 67 | end, 68 | with_directory_loader = function() 69 | for _, server in pairs(g.cluster.servers) do 70 | server.net_box:eval([[ 71 | require('migrator').set_loader( 72 | require('migrator.directory-loader').new('test/integration/migrations') 73 | ) 74 | ]]) 75 | end 76 | end 77 | } 78 | 79 | for k, configure_func in pairs(cases) do 80 | g['test_run_from_console_' .. k] = function() 81 | configure_func() 82 | 83 | for _, server in pairs(g.cluster.servers) do 84 | t.assert(server.net_box:eval('return box.space.first == nil'), server.alias) 85 | end 86 | local result = g.cluster.main_server.net_box:eval('return require("migrator").up()') 87 | t.assert_equals(result, { 88 | ["api-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"}, 89 | ["storage-1-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"}, 90 | ["storage-2-1"] = {"01_first.lua", "02_second.lua", "03_sharded.lua"} 91 | }) 92 | g.cluster:retrying({ timeout = 5 }, function() 93 | for _, server in pairs(g.cluster.servers) do 94 | t.assert_not(server.net_box:eval('return box.space.first == nil')) 95 | end 96 | end) 97 | end 98 | end 99 | -------------------------------------------------------------------------------- /test/integration/fockups_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('dangerous_operations') 3 | local fiber = require('fiber') -- luacheck: ignore 4 | 5 | local fio = require('fio') 6 | 7 | local cartridge_helpers = require('cartridge.test-helpers') 8 | 9 | local shared = require('test.helper.integration').shared 10 | local utils = require("test.helper.utils") 11 | 12 | local datadir = fio.pathjoin(shared.datadir, 'basic') 13 | 14 | g.cluster = cartridge_helpers.Cluster:new({ 15 | server_command = shared.server_command, 16 | datadir = datadir, 17 | use_vshard = true, 18 | base_advertise_port = 13400, 19 | base_http_port = 8090, 20 | replicasets = { 21 | { 22 | alias = 'api', 23 | uuid = cartridge_helpers.uuid('a'), 24 | roles = { 'vshard-router' }, 25 | servers = { { instance_uuid = cartridge_helpers.uuid('a', 1) } }, 26 | }, 27 | { 28 | alias = 'storage-1', 29 | uuid = cartridge_helpers.uuid('b'), 30 | roles = { 'vshard-storage' }, 31 | servers = { 32 | { instance_uuid = cartridge_helpers.uuid('b', 1), }, 33 | { instance_uuid = cartridge_helpers.uuid('b', 2), }, 34 | }, 35 | }, 36 | }, 37 | }) 38 | 39 | g.before_all(function() g.cluster:start() end) 40 | g.after_all(function() g.cluster:stop() end) 41 | g.after_each(function() utils.cleanup(g) end) 42 | 43 | g.test_drop = function() 44 | for _, server in pairs(g.cluster.servers) do 45 | server.net_box:eval([[ 46 | require('migrator').set_loader( 47 | require('migrator.config-loader').new() 48 | ) 49 | ]]) 50 | end 51 | 52 | -- create spaces and indexes, set schema 53 | local files = { "01_first.lua", "02_second.lua", "03_sharded.lua" } 54 | for _, v in ipairs(files) do 55 | local file = fio.open('test/integration/migrations/' .. v) 56 | local content = file:read() 57 | utils.set_sections(g, { { filename = "migrations/source/" .. v, content = content } }) 58 | file:close() 59 | end 60 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 61 | 62 | -- drop an existing index separately, check that new schema is applied successfully 63 | utils.set_sections(g, { { filename = "migrations/source/04_drop_index.lua", content = [[ 64 | return { 65 | up = function() 66 | box.space.first.index.value:drop() 67 | end 68 | } 69 | ]] } }) 70 | 71 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 72 | for _, server in pairs(g.cluster.servers) do 73 | t.assert(server.net_box:eval('return box.space.first.index.value == nil')) 74 | end 75 | 76 | utils.set_sections(g, { { filename = "migrations/source/05_drop_space.lua", content = [[ 77 | return { 78 | up = function() 79 | box.space.first:drop() 80 | end 81 | } 82 | ]] } }) 83 | 84 | -- drop a space, check that new schema is applied successfully 85 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 86 | for _, server in pairs(g.cluster.servers) do 87 | t.assert(server.net_box:eval('return box.space.first == nil')) 88 | end 89 | 90 | utils.set_sections(g, { { filename = "migrations/source/06_change_format.lua", content = [[ 91 | return { 92 | up = function() 93 | box.space.sharded:format({ 94 | { name = 'key', type = 'string' }, 95 | { name = 'bucket_id', type = 'unsigned' }, 96 | { name = 'value', type = 'any', is_nullable = true }, 97 | { name = 'external_id', type = 'string', is_nullable = true } 98 | }) 99 | end 100 | } 101 | ]] } }) 102 | 103 | -- change space format, check that new schema is applied successfully 104 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 105 | for _, server in pairs(g.cluster.servers) do 106 | t.assert_equals(server.net_box:eval('return box.space.sharded:format()')[4], { name = 'external_id', type = 'string', is_nullable = true }) 107 | end 108 | end 109 | 110 | g.test_error_in_migrations = function() 111 | for _, server in pairs(g.cluster.servers) do 112 | server.net_box:eval([[ 113 | require('migrator').set_loader( 114 | require('migrator.config-loader').new() 115 | ) 116 | ]]) 117 | end 118 | 119 | utils.set_sections(g, { { filename = "migrations/source/101_error.lua", content = [[ 120 | return { 121 | up = function() 122 | error('Oops') 123 | end 124 | } 125 | ]] } }) 126 | 127 | local status, resp = g.cluster.main_server:eval("return pcall(function() require('migrator').up() end)") 128 | t.assert_equals(status, false) 129 | t.assert_str_contains(tostring(resp), 'Oops') 130 | t.assert_str_contains(tostring(resp), 'Errors happened during migrations') 131 | end 132 | 133 | g.test_inconsistent_migrations = function() 134 | for _, server in pairs(g.cluster.servers) do 135 | server.net_box:eval([[ 136 | require('migrator').set_loader({ 137 | list = function() return {} end 138 | }) 139 | ]]) 140 | end 141 | g.cluster.main_server.net_box:eval([[ 142 | require('migrator').set_loader({ 143 | list = function(_) 144 | return { 145 | { 146 | name = '102_local', 147 | up = function() return true end 148 | }, 149 | } 150 | end 151 | }) 152 | ]]) 153 | 154 | local status, resp = g.cluster.main_server:eval("return pcall(function() require('migrator').up() end)") 155 | t.assert_equals(status, false) 156 | t.assert_str_contains(tostring(resp), 'Inconsistent migrations in cluster: ' 157 | .. 'expected: [\"102_local\"],') 158 | end 159 | 160 | g.test_reload = function() 161 | for _, server in pairs(g.cluster.servers) do 162 | local e_get_routes_cnt = [[ 163 | local httpd = require('cartridge').service_get('httpd') 164 | return table.maxn(httpd.routes) 165 | ]] 166 | local routes_count = server.net_box:eval(e_get_routes_cnt) 167 | local ok, err = server.net_box:eval([[ 168 | return require("cartridge.roles").reload() 169 | ]]) 170 | t.assert_equals({ ok, err }, { true, nil }) 171 | t.assert_equals(server.net_box:eval(e_get_routes_cnt), routes_count) 172 | end 173 | end 174 | 175 | -- https://github.com/tarantool/migrations/issues/56 176 | g.test_up_on_replica = function() 177 | for _, server in pairs(g.cluster.servers) do 178 | server.net_box:eval([[ 179 | require('migrator').set_loader( 180 | require('migrator.config-loader').new() 181 | ) 182 | ]]) 183 | end 184 | 185 | -- create some space 186 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 187 | utils.set_sections(g, { { filename = "migrations/source/100_create_space.lua", content = [[ 188 | return { 189 | up = function() 190 | local f = box.schema.create_space('somespace', { 191 | format = { 192 | { name = 'key', type = 'string' }, 193 | { name = 'value', type = 'string', is_nullable = true } 194 | }, 195 | if_not_exists = true, 196 | }) 197 | f:create_index('primary', { 198 | parts = { 'key' }, 199 | if_not_exists = true, 200 | }) 201 | end 202 | } 203 | ]] } }) 204 | g.cluster.main_server:http_request('post', '/migrations/up', { json = {} }) 205 | 206 | fiber.sleep(0.5) 207 | 208 | -- inject schema replication delay 209 | g.cluster:server('storage-1-2').net_box:eval([[ 210 | box.space._space:before_replace(function(old, new) os.execute('sleep 0.5'); return new end) 211 | ]]) 212 | 213 | -- change space format to make ddl schema incompatible 214 | utils.set_sections(g, { { filename = "migrations/source/101_alter_space.lua", content = [[ 215 | return { 216 | up = function() 217 | box.space.somespace:format({ 218 | { name = 'key', type = 'string' }, 219 | { name = 'value', type = 'string', is_nullable = true }, 220 | { name = 'secondvalue', type = 'string', is_nullable = true } 221 | }) 222 | end 223 | } 224 | ]] } }) 225 | g.cluster:server('storage-1-2'):http_request('post', '/migrations/up', { json = {} }) 226 | end 227 | 228 | g.test_up_clusterwide_applied_migrations_exist = function(cg) 229 | local main = cg.cluster.main_server 230 | -- Simulate previous version configuration. 231 | local _, err = main:eval([[ 232 | require('cartridge').config_patch_clusterwide({ 233 | migrations = { 234 | applied = { '001.lua', '002.lua' } 235 | } 236 | }) 237 | ]]) 238 | t.assert_not(err) 239 | 240 | local status, resp = main:eval([[ return pcall(require('migrator').up) ]]) 241 | t.assert_not(status) 242 | t.assert_str_contains(tostring(resp), 'A list of applied migrations is found in cluster config') 243 | end 244 | 245 | g.after_each(function() 246 | g.cluster:server('storage-1-2').net_box:eval([[ 247 | local f = box.space._space:before_replace() 248 | box.space._space:before_replace(nil, f[1]) 249 | ]]) 250 | end) 251 | -------------------------------------------------------------------------------- /test/integration/get_applied_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('get_migrations_state') 3 | 4 | local fio = require('fio') 5 | 6 | local cartridge_helpers = require('cartridge.test-helpers') 7 | local shared = require('test.helper.integration').shared 8 | local utils = require("test.helper.utils") 9 | local datadir = fio.pathjoin(shared.datadir, 'get_migrations_state') 10 | 11 | g.before_all(function() 12 | g.cluster = cartridge_helpers.Cluster:new({ 13 | server_command = shared.server_command, 14 | datadir = datadir, 15 | use_vshard = true, 16 | base_advertise_port = 10500, 17 | replicasets = { 18 | { 19 | alias = 'router', 20 | uuid = cartridge_helpers.uuid('a'), 21 | roles = { 'vshard-router', 'migrator' }, 22 | servers = { { 23 | alias = 'router', 24 | instance_uuid = cartridge_helpers.uuid('a', 1) 25 | } }, 26 | }, 27 | { 28 | alias = 'storage-1', 29 | uuid = cartridge_helpers.uuid('b'), 30 | roles = { 'vshard-storage', 'migrator' }, 31 | servers = { 32 | { 33 | alias = 'storage-1-master', 34 | instance_uuid = cartridge_helpers.uuid('b', 1), 35 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 36 | }, 37 | { 38 | alias = 'storage-1-replica', 39 | instance_uuid = cartridge_helpers.uuid('b', 2), 40 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 41 | }, 42 | }, 43 | }, 44 | { 45 | alias = 'storage-2', 46 | uuid = cartridge_helpers.uuid('c'), 47 | roles = { 'vshard-storage', 'migrator' }, 48 | servers = { 49 | { 50 | alias = 'storage-2-master', 51 | instance_uuid = cartridge_helpers.uuid('c', 1), 52 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 53 | }, 54 | { 55 | alias = 'storage-2-replica', 56 | instance_uuid = cartridge_helpers.uuid('c', 2), 57 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 58 | }, 59 | }, 60 | }, 61 | }, 62 | }) 63 | 64 | g.cluster:start() 65 | end) 66 | 67 | g.after_all(function() 68 | g.cluster:stop() 69 | fio.rmtree(g.cluster.datadir) 70 | end) 71 | 72 | g.after_each(function() utils.cleanup(g) end) 73 | 74 | g.test_get_migrations_state = function(cg) 75 | local main = cg.cluster.main_server 76 | 77 | local status, resp = main:eval("return pcall(require('migrator').up)") 78 | t.assert(status, tostring(resp)) 79 | t.assert_equals(resp, { 80 | ['router'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 81 | ['storage-1-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 82 | ['storage-2-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 83 | }) 84 | 85 | status, resp = main:eval("return pcall(require('migrator').get_applied)") 86 | t.assert(status, tostring(resp)) 87 | t.assert_equals(resp, { 88 | ['router'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 89 | ['storage-1-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 90 | ['storage-2-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 91 | }) 92 | 93 | -- Check the same result is returned by http. 94 | local result = main:http_request('get', '/migrations/applied') 95 | local expected_applied = { 96 | ['router'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 97 | ['storage-1-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 98 | ['storage-2-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 99 | } 100 | t.assert_equals(result.json, { applied = expected_applied }) 101 | end 102 | -------------------------------------------------------------------------------- /test/integration/migrations-gh-65/001_create_func.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | box.schema.func.create('sum', { 4 | body = [[ function(a, b) return a + b end ]] 5 | }) 6 | return true 7 | end 8 | } 9 | -------------------------------------------------------------------------------- /test/integration/migrations-gh-66/01_first.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | local fiber = require('fiber') 4 | fiber.sleep(5) 5 | return true 6 | end 7 | } 8 | -------------------------------------------------------------------------------- /test/integration/migrations/01_first.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | local f = box.schema.create_space('first', { 4 | format = { 5 | { name = 'key', type = 'string' }, 6 | { name = 'value', type = 'string', is_nullable = true } 7 | }, 8 | if_not_exists = true, 9 | }) 10 | f:create_index('primary', { 11 | parts = { 'key' }, 12 | if_not_exists = true, 13 | }) 14 | return true 15 | end 16 | } 17 | -------------------------------------------------------------------------------- /test/integration/migrations/02_second.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | box.space.first:create_index('value', { 4 | parts = { 'value' }, 5 | unique = false, 6 | if_not_exists = true, 7 | }) 8 | end 9 | } 10 | -------------------------------------------------------------------------------- /test/integration/migrations/03_sharded.lua: -------------------------------------------------------------------------------- 1 | local utils = require('migrator.utils') 2 | 3 | return { 4 | up = function() 5 | local f = box.schema.create_space('sharded', { 6 | format = { 7 | { name = 'key', type = 'string' }, 8 | { name = 'bucket_id', type = 'unsigned' }, 9 | { name = 'value', type = 'any', is_nullable = true } 10 | }, 11 | if_not_exists = true, 12 | }) 13 | f:create_index('primary', { 14 | parts = { 'key' }, 15 | if_not_exists = true, 16 | }) 17 | f:create_index('bucket_id', { 18 | parts = { 'bucket_id' }, 19 | if_not_exists = true, 20 | unique = false 21 | }) 22 | utils.register_sharding_key('sharded', {'key'}) 23 | return nil 24 | end 25 | } 26 | -------------------------------------------------------------------------------- /test/integration/migrations_check_roles_enabled/01_first.lua: -------------------------------------------------------------------------------- 1 | local utils = require('migrator.utils') 2 | 3 | return { 4 | up = function() 5 | if utils.check_roles_enabled({'vshard-router'}) then 6 | rawset(_G, 'vshard-router-set', true) 7 | end 8 | 9 | if utils.check_roles_enabled({'vshard-storage'}) then 10 | rawset(_G, 'vshard-storage-set', true) 11 | end 12 | return true 13 | end 14 | } 15 | -------------------------------------------------------------------------------- /test/integration/move_migrations_state_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('move_migrations_state') 3 | 4 | local fio = require('fio') 5 | 6 | local cartridge_helpers = require('cartridge.test-helpers') 7 | local shared = require('test.helper.integration').shared 8 | local utils = require("test.helper.utils") 9 | local datadir = fio.pathjoin(shared.datadir, 'move_migrations_state') 10 | 11 | g.before_all(function() 12 | g.cluster = cartridge_helpers.Cluster:new({ 13 | server_command = shared.server_command, 14 | datadir = datadir, 15 | use_vshard = true, 16 | base_advertise_port = 10600, 17 | replicasets = { 18 | { 19 | alias = 'router', 20 | uuid = cartridge_helpers.uuid('a'), 21 | roles = { 'vshard-router', 'migrator' }, 22 | servers = { { 23 | alias = 'router', 24 | instance_uuid = cartridge_helpers.uuid('a', 1) 25 | } }, 26 | }, 27 | { 28 | alias = 'storage-1', 29 | uuid = cartridge_helpers.uuid('b'), 30 | roles = { 'vshard-storage', 'migrator' }, 31 | servers = { 32 | { 33 | alias = 'storage-1-master', 34 | instance_uuid = cartridge_helpers.uuid('b', 1), 35 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 36 | }, 37 | { 38 | alias = 'storage-1-replica', 39 | instance_uuid = cartridge_helpers.uuid('b', 2), 40 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 41 | }, 42 | }, 43 | }, 44 | { 45 | alias = 'storage-2', 46 | uuid = cartridge_helpers.uuid('c'), 47 | roles = { 'vshard-storage', 'migrator' }, 48 | servers = { 49 | { 50 | alias = 'storage-2-master', 51 | instance_uuid = cartridge_helpers.uuid('c', 1), 52 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 53 | }, 54 | { 55 | alias = 'storage-2-replica', 56 | instance_uuid = cartridge_helpers.uuid('c', 2), 57 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 58 | }, 59 | }, 60 | }, 61 | }, 62 | }) 63 | 64 | g.cluster:start() 65 | end) 66 | 67 | g.after_all(function() 68 | g.cluster:stop() 69 | fio.rmtree(g.cluster.datadir) 70 | end) 71 | 72 | g.after_each(function() utils.cleanup(g) end) 73 | 74 | g.test_move_migrations_state = function(cg) 75 | local main = cg.cluster.main_server 76 | 77 | -- Pretend first two migrations are already applied on cluster by prev migrator version. 78 | main:eval([[ 79 | require('cartridge').config_patch_clusterwide({ 80 | migrations = { 81 | applied = { '01_first.lua', '02_second.lua' } 82 | } 83 | }) 84 | ]]) 85 | 86 | -- `up` call does not work due to non-empty cluster-wide migrations list. 87 | local status, resp = main:eval("return pcall(require('migrator').up)") 88 | t.assert_not(status) 89 | t.assert_str_contains(tostring(resp), 'Cannot perform an upgrade.') 90 | 91 | -- Move migrations. 92 | status, resp = main:eval("return pcall(require('migrator').move_migrations_state)") 93 | t.assert(status, tostring(resp)) 94 | t.assert_items_equals(resp, { 95 | ["router"] = {"01_first.lua", "02_second.lua"}, 96 | ["storage-1-master"] = {"01_first.lua", "02_second.lua"}, 97 | ["storage-2-master"] = {"01_first.lua", "02_second.lua"}, 98 | }) 99 | 100 | -- Check migrations are copied. 101 | for _, server_alias in pairs({'router', 'storage-1-master', 'storage-2-master'}) do 102 | t.assert(cg.cluster:server(server_alias):eval([[ 103 | return box.space._migrations:get(1)['name'] == '01_first.lua' and 104 | box.space._migrations:get(2)['name'] == '02_second.lua' 105 | ]])) 106 | end 107 | t.assert(main:eval([[ 108 | return require('cartridge.confapplier').get_readonly('migrations').applied == nil 109 | ]])) 110 | 111 | -- `up` should perform 03 migration only. 112 | status, resp = main:eval("return pcall(require('migrator').up)") 113 | t.assert(status, tostring(resp)) 114 | t.assert_equals(resp, { 115 | ['router'] = { '03_sharded.lua' }, 116 | ['storage-1-master'] = { '03_sharded.lua' }, 117 | ['storage-2-master'] = { '03_sharded.lua' }, 118 | }) 119 | end 120 | 121 | g.test_move_migrations_state_http = function(cg) 122 | local main = cg.cluster.main_server 123 | 124 | main:eval([[ 125 | require('cartridge').config_patch_clusterwide({ 126 | migrations = { 127 | applied = { '01_first.lua', '02_second.lua' } 128 | } 129 | }) 130 | ]]) 131 | 132 | -- Move migrations. 133 | local result = main:http_request('post', '/migrations/move_migrations_state', { json = {} }) 134 | local expected_moved = { 135 | ['router'] = { '01_first.lua', '02_second.lua' }, 136 | ['storage-1-master'] = { '01_first.lua', '02_second.lua' }, 137 | ['storage-2-master'] = { '01_first.lua', '02_second.lua' }, 138 | } 139 | t.assert_equals(result.json, { migrations_moved = expected_moved }) 140 | 141 | -- Check migrations are copied. 142 | for _, server_alias in pairs({'router', 'storage-1-master', 'storage-2-master'}) do 143 | t.assert(cg.cluster:server(server_alias):eval([[ 144 | return box.space._migrations:get(1)['name'] == '01_first.lua' and 145 | box.space._migrations:get(2)['name'] == '02_second.lua' 146 | ]])) 147 | end 148 | end 149 | 150 | g.test_move_migrations_call_on_replica = function(cg) 151 | local main = cg.cluster.main_server 152 | 153 | -- Pretend first two migrations are already applied on cluster by prev migrator version. 154 | main:eval([[ 155 | require('cartridge').config_patch_clusterwide({ 156 | migrations = { 157 | applied = { '01_first.lua', '02_second.lua' } 158 | } 159 | }) 160 | ]]) 161 | 162 | -- Move migrations. 163 | local status, resp = cg.cluster:server('storage-1-replica'):eval( 164 | "return pcall(require('migrator').move_migrations_state)") 165 | t.assert(status, tostring(resp)) 166 | t.assert_items_equals(resp, { 167 | ["router"] = {"01_first.lua", "02_second.lua"}, 168 | ["storage-1-master"] = {"01_first.lua", "02_second.lua"}, 169 | ["storage-2-master"] = {"01_first.lua", "02_second.lua"}, 170 | }) 171 | 172 | -- Check migrations are copied. 173 | for _, server_alias in pairs({'router', 'storage-1-master', 'storage-2-master'}) do 174 | t.assert(cg.cluster:server(server_alias):eval([[ 175 | return box.space._migrations:get(1)['name'] == '01_first.lua' and 176 | box.space._migrations:get(2)['name'] == '02_second.lua' 177 | ]])) 178 | end 179 | t.assert(main:eval([[ 180 | return require('cartridge.confapplier').get_readonly('migrations').applied == nil 181 | ]])) 182 | end 183 | 184 | g.test_move_empty_migrations_state = function(cg) 185 | local main = cg.cluster.main_server 186 | 187 | -- Pretend first two migrations are already applied on custer by prev migrator version. 188 | main:eval([[ 189 | require('cartridge').config_patch_clusterwide({ 190 | migrations = {applied = {}}, 191 | options = {storage_timeout = 3.0} 192 | }) 193 | ]]) 194 | 195 | -- Move migrations. No config - no errors. 196 | local status, resp = main:eval("return pcall(require('migrator').move_migrations_state)") 197 | t.assert(status, tostring(resp)) 198 | t.assert_items_equals(resp, {}) 199 | end 200 | 201 | g.test_move_migrations_consistency_check = function(cg) 202 | local main = cg.cluster.main_server 203 | 204 | local status, resp = main:eval([[ 205 | return pcall(require('cartridge').config_patch_clusterwide, 206 | {migrations = {applied = {}}}) 207 | ]]) 208 | t.assert(status, tostring(resp)) 209 | 210 | -- Apply all migrations. 211 | status, resp = main:eval("return pcall(require('migrator').up)") 212 | t.assert(status, tostring(resp)) 213 | t.assert_equals(resp, { 214 | ['router'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 215 | ['storage-1-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 216 | ['storage-2-master'] = { '01_first.lua', '02_second.lua', '03_sharded.lua' }, 217 | }) 218 | 219 | main:eval([[ 220 | require('cartridge').config_patch_clusterwide({ 221 | migrations = {applied = { '01_first.lua', '02_second.lua', '03_sharded.lua' }}, 222 | }) 223 | ]]) 224 | 225 | -- Migrations in config are consistent with local. No error. 226 | status, resp = main:eval("return pcall(require('migrator').move_migrations_state)") 227 | t.assert(status, tostring(resp)) 228 | t.assert_items_equals(resp, { 229 | ['router'] = {}, 230 | ['storage-1-master'] = {}, 231 | ['storage-2-master'] = {}, 232 | }) 233 | 234 | -- Make state inconsistent. 235 | main:eval([[ 236 | require('cartridge').config_patch_clusterwide({ 237 | migrations = {applied = { '01_first.lua', '03_sharded.lua' }} 238 | }) 239 | ]]) 240 | status, resp = main:eval("return pcall(require('migrator').move_migrations_state)") 241 | t.assert_not(status) 242 | t.assert_str_contains(tostring(resp), 'Inconsistency between cluster-wide and local applied migrations') 243 | 244 | -- Make sure cluster-wide migrations state is still there. 245 | t.assert(main:eval([[ 246 | return require('cartridge.confapplier').get_readonly('migrations').applied ~= nil 247 | ]])) 248 | end 249 | 250 | g.test_move_migrations_append_to_existing_local = function(cg) 251 | local main = cg.cluster.main_server 252 | 253 | for _, server in pairs(cg.cluster.servers) do 254 | server:eval([[ 255 | require('migrator').set_loader({ 256 | list = function() 257 | return { 258 | { 259 | name = '01.lua', 260 | up = function() return true end 261 | }, 262 | } 263 | end 264 | }) 265 | ]]) 266 | end 267 | 268 | local status, resp = main:eval("return pcall(require('migrator').up)") 269 | t.assert(status, tostring(resp)) 270 | t.assert_equals(resp, { 271 | ['router'] = { '01.lua' }, 272 | ['storage-1-master'] = { '01.lua' }, 273 | ['storage-2-master'] = { '01.lua' }, 274 | }) 275 | 276 | -- Append "applied" migrations to cluster config. 277 | main:eval([[ 278 | require('cartridge').config_patch_clusterwide({ 279 | migrations = {applied = { '01.lua', '02.lua' }}, 280 | }) 281 | ]]) 282 | 283 | -- Only new missing applied migrations is copied to local storage. 284 | status, resp = main:eval("return pcall(require('migrator').move_migrations_state)") 285 | t.assert(status, tostring(resp)) 286 | t.assert_items_equals(resp, { 287 | ['router'] = { '02.lua' }, 288 | ['storage-1-master'] = { '02.lua' }, 289 | ['storage-2-master'] = { '02.lua' }, 290 | }) 291 | 292 | t.assert(main:eval([[ 293 | return require('cartridge.confapplier').get_readonly('migrations').applied == nil 294 | ]])) 295 | end 296 | -------------------------------------------------------------------------------- /test/integration/new_replicaset_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('join_new_instance') 3 | local fiber = require('fiber') -- luacheck: ignore 4 | 5 | local fio = require('fio') 6 | 7 | local cartridge_helpers = require('cartridge.test-helpers') 8 | local shared = require('test.helper.integration').shared 9 | local datadir = fio.pathjoin(shared.datadir, 'join_new_server') 10 | local utils = require("test.helper.utils") 11 | 12 | 13 | 14 | g.before_all(function() 15 | g.cluster = cartridge_helpers.Cluster:new({ 16 | server_command = shared.server_command, 17 | datadir = datadir, 18 | use_vshard = true, 19 | base_advertise_port = 10700, 20 | replicasets = { 21 | { 22 | alias = 'router', 23 | uuid = cartridge_helpers.uuid('a'), 24 | roles = { 'vshard-router' }, 25 | servers = { { 26 | alias = 'router', 27 | instance_uuid = cartridge_helpers.uuid('a', 1) 28 | } }, 29 | }, 30 | { 31 | alias = 'storage-1', 32 | uuid = cartridge_helpers.uuid('b'), 33 | roles = { 'vshard-storage' }, 34 | servers = { 35 | { 36 | alias = 'storage-1-master', 37 | instance_uuid = cartridge_helpers.uuid('b', 1), 38 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 39 | }, 40 | { 41 | alias = 'storage-1-replica', 42 | instance_uuid = cartridge_helpers.uuid('b', 2), 43 | env = {TARANTOOL_HTTP_ENABLED = 'false'}, 44 | }, 45 | }, 46 | }, 47 | }, 48 | }) 49 | 50 | g.new_server = cartridge_helpers.Server:new({ 51 | alias = 'storage-2-master', 52 | command = g.cluster.server_command, 53 | replicaset_uuid = cartridge_helpers.uuid('c'), 54 | instance_uuid = cartridge_helpers.uuid('c', 1), 55 | cluster_cookie = g.cluster.cookie, 56 | workdir = datadir, 57 | advertise_port = 10204, 58 | http_port = 8084, 59 | }) 60 | 61 | g.cluster:start() 62 | g.new_server:start() 63 | end) 64 | 65 | g.after_all(function() 66 | g.cluster:stop() 67 | g.new_server:stop() 68 | fio.rmtree(g.cluster.datadir) 69 | fio.rmtree(g.new_server.workdir) 70 | end) 71 | 72 | g.after_each(function() utils.cleanup(g) end) 73 | 74 | g.test_gh_65_migrations_in_new_replicaset = function(cg) 75 | local main = cg.cluster.main_server 76 | 77 | main:eval([[ 78 | require('cartridge').config_patch_clusterwide({migrations = {options = {storage_timeout = 3.0}}}) 79 | ]]) 80 | 81 | local set_loader = [[ 82 | require('migrator').set_loader( 83 | require('migrator.directory-loader').new('test/integration/migrations-gh-65') 84 | ) 85 | ]] 86 | 87 | for _, server in pairs(cg.cluster.servers) do 88 | server.net_box:eval(set_loader) 89 | end 90 | 91 | local status, resp = main:eval("return pcall(require('migrator').up)") 92 | t.assert(status, tostring(resp)) 93 | t.assert_equals(resp, { 94 | ["router"] = {"001_create_func.lua"}, 95 | ["storage-1-master"] = {"001_create_func.lua"}, 96 | }) 97 | 98 | t.assert(cg.cluster:server('router'):eval([[ return box.func.sum ~= nil ]])) 99 | t.assert(cg.cluster:server('storage-1-master'):eval([[ return box.func.sum ~= nil ]])) 100 | t.assert(cg.cluster:server('storage-1-replica'):eval([[ return box.func.sum ~= nil ]])) 101 | 102 | cg.new_server:eval(set_loader) 103 | cg.new_server:join_cluster(main) 104 | cg.cluster:wait_until_healthy() 105 | 106 | -- Wait until new member really become healthy. 107 | cg.cluster:retrying({ timeout = 5 }, function() 108 | t.assert(main:eval([[ 109 | local member = require('membership').get_member('localhost:10204') 110 | return member and member.payload.state_prev == 'ConfiguringRoles' or 111 | member.payload.state_prev == 'RolesConfigured' 112 | ]])) 113 | end) 114 | 115 | status, resp = main:eval("return pcall(require('migrator').up)") 116 | t.assert(status, tostring(resp)) 117 | t.assert_equals(resp, { 118 | ["storage-2-master"] = {"001_create_func.lua"}, 119 | }) 120 | t.assert(cg.new_server:eval([[ return box.func.sum ~= nil ]])) 121 | end 122 | -------------------------------------------------------------------------------- /test/integration/no_cartridge_ddl_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | 3 | local fio = require('fio') 4 | 5 | local cartridge_helpers = require('cartridge.test-helpers') 6 | local shared = require('test.helper') 7 | local utils = require("test.helper.utils") 8 | 9 | local g = t.group('no_cartridge_ddl') 10 | 11 | local datadir = fio.pathjoin(shared.datadir, 'no_ddl') 12 | 13 | g.cluster = cartridge_helpers.Cluster:new({ 14 | server_command = shared.server_command, 15 | datadir = datadir, 16 | use_vshard = true, 17 | base_advertise_port = 13400, 18 | base_http_port = 8090, 19 | replicasets = { 20 | { 21 | alias = 'api', 22 | uuid = cartridge_helpers.uuid('a'), 23 | roles = { 'vshard-router' }, 24 | servers = { { instance_uuid = cartridge_helpers.uuid('a', 1) } }, 25 | }, 26 | { 27 | alias = 'storage-1', 28 | uuid = cartridge_helpers.uuid('b'), 29 | roles = { 'vshard-storage' }, 30 | servers = { 31 | { instance_uuid = cartridge_helpers.uuid('b', 1), }, 32 | }, 33 | }, 34 | }, 35 | }) 36 | 37 | g.before_all(function() 38 | g.cluster:start() 39 | for _, server in ipairs(g.cluster.servers) do 40 | server.net_box:eval("require('migrator').set_use_cartridge_ddl(false)") 41 | end 42 | end) 43 | g.after_all(function() g.cluster:stop() end) 44 | g.after_each(function() utils.cleanup(g) end) 45 | 46 | local cases = { 47 | with_config_loader = function() 48 | for _, server in pairs(g.cluster.servers) do 49 | server.net_box:eval([[ 50 | require('migrator').set_loader( 51 | require('migrator.config-loader').new() 52 | ) 53 | ]]) 54 | end 55 | 56 | local files = {"01_first.lua", "02_second.lua", "03_sharded.lua"} 57 | for _, v in ipairs(files) do 58 | local file = fio.open('test/integration/migrations/' .. v) 59 | local content = file:read() 60 | utils.set_sections(g, {{filename="migrations/source/"..v, content=content}}) 61 | file:close() 62 | end 63 | end, 64 | with_directory_loader = function() 65 | for _, server in pairs(g.cluster.servers) do 66 | server.net_box:eval([[ 67 | require('migrator').set_loader( 68 | require('migrator.directory-loader').new('test/integration/migrations') 69 | ) 70 | ]]) 71 | end 72 | end 73 | } 74 | 75 | for k, configure_func in pairs(cases) do 76 | g['test_no_cartridge_ddl_' .. k] = function() 77 | configure_func() 78 | 79 | local main = g.cluster.main_server 80 | 81 | for _, server in pairs(g.cluster.servers) do 82 | t.assert(server.net_box:eval('return box.space.first == nil')) 83 | end 84 | main:http_request('post', '/migrations/up', { json = {} }) 85 | for _, server in pairs(g.cluster.servers) do 86 | t.assert_not(server.net_box:eval('return box.space.first == nil')) 87 | end 88 | local config = main:download_config() 89 | t.assert_not(config.schema) 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /test/integration/upgrade_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | 3 | local fio = require('fio') 4 | 5 | local cartridge_helpers = require('cartridge.test-helpers') 6 | local shared = require('test.helper.integration').shared 7 | local utils = require("test.helper.utils") 8 | 9 | local g = t.group('upgrade') 10 | 11 | local datadir = fio.pathjoin(shared.datadir, 'upgrade') 12 | 13 | g.before_all(function() 14 | g.cluster = cartridge_helpers.Cluster:new({ 15 | server_command = shared.server_command, 16 | datadir = datadir, 17 | use_vshard = false, 18 | base_advertise_port = 13400, 19 | base_http_port = 8090, 20 | replicasets = { 21 | { 22 | alias = 'storage-1', 23 | uuid = cartridge_helpers.uuid('a'), 24 | roles = { 'migrator' }, 25 | servers = { { instance_uuid = cartridge_helpers.uuid('a', 1) } }, 26 | }, 27 | }, 28 | }) 29 | 30 | g.cluster:start() 31 | end) 32 | 33 | g.after_all(function() 34 | g.cluster:stop() 35 | fio.rmtree(g.cluster.datadir) 36 | end) 37 | g.after_each(function() utils.cleanup(g) end) 38 | 39 | g.test_upgrade_basic = function(cg) 40 | local main = cg.cluster.main_server 41 | main:eval([[ 42 | require('migrator').set_loader( 43 | require('migrator.config-loader').new()) 44 | ]]) 45 | utils.set_sections(g, { 46 | { 47 | filename = "migrations/source/01_script.lua", 48 | content = [[ 49 | return { 50 | up = function() 51 | box.schema.create_space('test', { 52 | format = {{'id', type='unsigned'}}, 53 | }) 54 | box.space.test:create_index('p') 55 | end 56 | } 57 | ]] 58 | }, 59 | { 60 | filename = "migrations/source/02_script.lua", 61 | content = [[ 62 | return { 63 | up = function() 64 | box.space.test:insert{1} 65 | end 66 | } 67 | ]] 68 | }, 69 | }) 70 | 71 | 72 | local result = main:eval([[ return require('migrator').upgrade() ]]) 73 | t.assert_equals(result.applied_now, {'01_script.lua', '02_script.lua'}) 74 | t.assert_equals(result.applied, {'01_script.lua', '02_script.lua'}) 75 | 76 | t.assert(main:eval([[ return box.space.test:get(1) ]])) 77 | t.assert_not(main:eval([[ return box.space.test:get(2) ]])) 78 | 79 | -- Append migration script. 80 | utils.set_sections(g, { 81 | { 82 | filename = "migrations/source/03_script.lua", 83 | content = [[ return { up = function() box.space.test:insert{2} end } ]] 84 | }, 85 | }) 86 | result = main:eval([[ return require('migrator').upgrade() ]]) 87 | t.assert_equals(result.applied_now, { '03_script.lua' }) 88 | t.assert_equals(result.applied, { '01_script.lua', '02_script.lua', '03_script.lua' }) 89 | t.assert(main:eval([[ return box.space.test:get(1) ]])) 90 | t.assert(main:eval([[ return box.space.test:get(2) ]])) 91 | end 92 | 93 | g.test_upgrade_clusterwide_applied_migrations_exist = function(cg) 94 | local main = cg.cluster.main_server 95 | -- Simulate previous version configuration. 96 | local _, err = main:eval([[ 97 | require('cartridge').config_patch_clusterwide({ 98 | migrations = { 99 | applied = { '001.lua', '002.lua' } 100 | } 101 | }) 102 | ]]) 103 | t.assert_not(err) 104 | 105 | local status, resp = main:eval([[ return pcall(require('migrator').upgrade) ]]) 106 | t.assert_not(status) 107 | t.assert_str_contains(tostring(resp), 'A list of applied migrations is found in cluster config') 108 | end 109 | 110 | -------------------------------------------------------------------------------- /test/unit/check_roles_enabled_test.lua: -------------------------------------------------------------------------------- 1 | local t = require("luatest") 2 | local g = t.group("check_roles_enabled_unit") 3 | 4 | g.before_all(function(cg) 5 | cg._orig_confapplier = package.loaded["cartridge.confapplier"] 6 | end) 7 | 8 | g.after_all(function(cg) 9 | package.loaded["cartridge.confapplier"] = cg._orig_confapplier 10 | end) 11 | 12 | g.test_check = function() 13 | package.loaded["cartridge.confapplier"] = { 14 | get_readonly = function() 15 | return { 16 | replicasets = { 17 | [box.info.cluster.uuid] = { 18 | roles = { 19 | ['space-explorer'] = true, 20 | ['vshard-router'] = true, 21 | ['crud-router'] = true, 22 | ['my_super_role'] = true, 23 | } 24 | } 25 | } 26 | } 27 | end 28 | } 29 | 30 | local utils = require('migrator.utils') 31 | 32 | t.assert(utils.check_roles_enabled({'crud-router', 'my_super_role'})) 33 | t.assert(utils.check_roles_enabled({'space-explorer', 'vshard-router', 'crud-router', 'my_super_role'})) 34 | t.assert(utils.check_roles_enabled({})) 35 | t.assert(utils.check_roles_enabled({'my_super_role'})) 36 | t.assert_not(utils.check_roles_enabled({'crud-storage', 'my_super_role'})) 37 | t.assert_not(utils.check_roles_enabled({'crud-storage'})) 38 | t.assert_not(utils.check_roles_enabled({'crud-storage', 'expirationd'})) 39 | end 40 | -------------------------------------------------------------------------------- /test/unit/config_loader_test.lua: -------------------------------------------------------------------------------- 1 | local t = require("luatest") 2 | local g = t.group("config-loader") 3 | local fun = require("fun") 4 | 5 | local loader = require("migrator.config-loader") 6 | 7 | g.test___must_sort_sorts = function() 8 | local a = {{name = '002_second.lua'}, {name = '003_third.lua'}, {name = '001_first.lua'}} 9 | loader.__must_sort(a) 10 | t.assert_equals(a, {{name = '001_first.lua'}, {name = '002_second.lua'}, {name = '003_third.lua'}}) 11 | end 12 | 13 | local function mock_clusterwide_config(cfg) 14 | package.loaded["cartridge.confapplier"] = { 15 | get_active_config = function() 16 | return { 17 | get_plaintext = function() 18 | return cfg 19 | end 20 | } 21 | end 22 | } 23 | end 24 | 25 | for name, case in pairs({ 26 | no_migrations = {cfg = {}, expected = {}}, 27 | single_migration = { 28 | cfg = { 29 | ['migrations/source/001_first.lua'] = 'return {up = function() return "first" end}', 30 | }, 31 | expected = {"001_first.lua"}, 32 | }, 33 | multiple_migrations = { 34 | cfg = { 35 | ['migrations/source/001_first.lua'] = 'return {up = function() return "first" end}', 36 | ['migrations/source/002_second.lua'] = 'return {up = function() return "second" end}', 37 | }, 38 | expected = {"001_first.lua", "002_second.lua"}, 39 | }, 40 | ignores_other_config_sections = { 41 | cfg = { 42 | ['migrations/source/001_first.lua'] = 'return {up = function() return "first" end}', 43 | ['migrations/MY_MIGRATIONS/002_second.lua'] = 'return {up = function() return "second" end}', 44 | }, 45 | expected = {"001_first.lua"}, 46 | }, 47 | ignores_lua_errors = { 48 | cfg = { 49 | ['migrations/source/001_first.lua'] = 'return {up = function() return "first" end}', 50 | ['migrations/source/002_with_lua_err.lua'] = 'return up = function() return "second" end}', 51 | }, 52 | expected = {"001_first.lua"}, 53 | }, 54 | }) do 55 | g['test_' .. name] = function() 56 | local l = loader.new() 57 | mock_clusterwide_config(case.cfg) 58 | 59 | local names = fun.iter(l:list()):map(function(x) return x.name end):totable() 60 | t.assert_equals(names, case.expected) 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /test/unit/directory_loader_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local g = t.group('directory-loader') 3 | 4 | local fun = require('fun') 5 | local fio = require('fio') 6 | 7 | local loader = require('migrator.directory-loader') 8 | 9 | g.test_positive = function() 10 | local list = loader.new('test/unit/migrations/positive'):list() 11 | local names = fun.iter(list):map(function(x) return x.name end):totable() 12 | t.assert_equals(names, { '01_first.lua', '02_second.lua' }) 13 | end 14 | 15 | g.test_missing_folder = function() 16 | local loader = loader.new('test/unit/mmmmirgations') -- luacheck: ignore 17 | t.assert_error_msg_contains('is not valid', loader.list, loader) 18 | end 19 | 20 | g.test_dynamic_folder = function() 21 | fio.rmtree('test/unit/migrations/empty') 22 | fio.mkdir('test/unit/migrations/empty') 23 | local ldr = loader.new('test/unit/migrations/empty') 24 | t.assert_equals(ldr:list(), { }) 25 | 26 | fio.copyfile('test/unit/migrations/positive/01_first.lua', './test/unit/migrations/empty/test.lua') 27 | t.assert_equals(ldr:list()[1].name, 'test.lua') 28 | fio.rmtree('test/unit/migrations/empty') 29 | end 30 | 31 | 32 | -------------------------------------------------------------------------------- /test/unit/migrations/positive/01_first.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | local f = box.schema.create_space('first', { 4 | format = { 5 | { name = 'key', type = 'string' }, 6 | { name = 'value', type = 'string', is_nullable = true } 7 | }, 8 | if_not_exists = true, 9 | }) 10 | f:create_index('primary', { 11 | parts = { 'key' }, 12 | if_not_exists = true, 13 | }) 14 | return true 15 | end 16 | } 17 | -------------------------------------------------------------------------------- /test/unit/migrations/positive/02_second.lua: -------------------------------------------------------------------------------- 1 | return { 2 | up = function() 3 | box.space.first:create_index('value', { 4 | parts = { 'value' }, 5 | unique = false, 6 | if_not_exists = true, 7 | }) 8 | return true 9 | end 10 | } 11 | -------------------------------------------------------------------------------- /tmp/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/migrations/ae4d8745bb7b07234ee7fd678030ef44f1db3543/tmp/.keep --------------------------------------------------------------------------------