├── .github └── workflows │ ├── fast_testing.yml │ ├── lint.yml │ ├── publishing.yml │ └── reusable_testing.yml ├── .gitignore ├── .gitmodules ├── .luacheckrc ├── CMakeLists.txt ├── LICENSE ├── README.md ├── changelogs ├── 0.1.20.md ├── 0.1.21.md ├── 0.1.22.md ├── 0.1.23.md └── 0.1.24.md ├── cmake ├── FindMsgPuck.cmake └── FindTarantool.cmake ├── debian ├── .gitignore ├── changelog ├── compat ├── control ├── copyright ├── docs ├── prebuild.sh ├── rules └── source │ └── format ├── docs └── RFC │ └── replicaset_lock_and_bucket.md ├── example ├── .tarantoolctl ├── Makefile ├── generate_load.lua ├── localcfg.lua ├── router.lua ├── router_1.lua ├── storage.lua ├── storage_1_a.lua ├── storage_1_b.lua ├── storage_2_a.lua ├── storage_2_b.lua └── vshard ├── rpm ├── prebuild.sh └── tarantool-vshard.spec ├── sharding_arch.png ├── test ├── .tarantoolctl ├── CMakeLists.txt ├── certs │ ├── ca.crt │ ├── server.crt │ └── server.key ├── failover │ ├── box_1_a.lua │ ├── box_1_b.lua │ ├── box_1_c.lua │ ├── box_1_d.lua │ ├── box_2_a.lua │ ├── box_2_b.lua │ ├── box_2_c.lua │ ├── box_3_a.lua │ ├── box_3_b.lua │ ├── cluster_changes.result │ ├── cluster_changes.test.lua │ ├── config.lua │ ├── failover.result │ ├── failover.test.lua │ ├── failover_errinj.result │ ├── failover_errinj.test.lua │ ├── names.lua │ ├── router_1.lua │ ├── router_2.lua │ ├── router_3.lua │ ├── router_4.lua │ ├── suite.ini │ └── test.lua ├── instances │ ├── default.lua │ ├── router.lua │ └── storage.lua ├── lua_libs │ ├── git_util.lua │ ├── localcfg.lua │ ├── storage_template.lua │ └── util.lua ├── luatest_helpers.lua ├── luatest_helpers │ ├── cluster.lua │ ├── server.lua │ └── vtest.lua ├── misc │ ├── bad_uuid_1_a.lua │ ├── bad_uuid_1_b.lua │ ├── bad_uuid_2_a.lua │ ├── bad_uuid_2_a_repaired.lua │ ├── bad_uuid_2_b.lua │ ├── bad_uuid_config.lua │ ├── bad_uuid_router.lua │ ├── check_uuid_on_connect.result │ ├── check_uuid_on_connect.test.lua │ ├── fullmesh.result │ ├── fullmesh.test.lua │ ├── master_switch.result │ ├── master_switch.test.lua │ ├── reconfigure.result │ ├── reconfigure.test.lua │ ├── router_1.lua │ ├── storage_1_a.lua │ ├── storage_1_b.lua │ ├── storage_2_a.lua │ ├── storage_2_b.lua │ ├── storage_3_a.lua │ ├── suite.ini │ └── test.lua ├── multiple_routers │ ├── configs.lua │ ├── multiple_routers.result │ ├── multiple_routers.test.lua │ ├── router_1.lua │ ├── storage_1_1_a.lua │ ├── storage_1_1_b.lua │ ├── storage_1_2_a.lua │ ├── storage_1_2_b.lua │ ├── storage_2_1_a.lua │ ├── storage_2_1_b.lua │ ├── storage_2_2_a.lua │ ├── storage_2_2_b.lua │ ├── suite.ini │ └── test.lua ├── rebalancer │ ├── box_1_a.lua │ ├── box_1_b.lua │ ├── box_2_a.lua │ ├── box_2_b.lua │ ├── box_3_a.lua │ ├── box_3_b.lua │ ├── box_4_a.lua │ ├── box_4_b.lua │ ├── bucket_ref.result │ ├── bucket_ref.test.lua │ ├── config.lua │ ├── engine.cfg │ ├── errinj.result │ ├── errinj.test.lua │ ├── fullbox_1_a.lua │ ├── fullbox_1_b.lua │ ├── fullbox_2_a.lua │ ├── fullbox_2_b.lua │ ├── fullbox_3_a.lua │ ├── fullbox_3_b.lua │ ├── fullbox_4_a.lua │ ├── fullbox_4_b.lua │ ├── parallel.result │ ├── parallel.test.lua │ ├── rebalancer.result │ ├── rebalancer.test.lua │ ├── rebalancer2.result │ ├── rebalancer2.test.lua │ ├── rebalancer_lock_and_pin.result │ ├── rebalancer_lock_and_pin.test.lua │ ├── rebalancer_utils.lua │ ├── receiving_bucket.result │ ├── receiving_bucket.test.lua │ ├── restart_during_rebalancing.result │ ├── restart_during_rebalancing.test.lua │ ├── router_1.lua │ ├── stress_add_remove_rs.result │ ├── stress_add_remove_rs.test.lua │ ├── stress_add_remove_several_rs.result │ ├── stress_add_remove_several_rs.test.lua │ ├── suite.ini │ └── test.lua ├── reload_evolution │ ├── storage.result │ ├── storage.test.lua │ ├── storage_1_a.lua │ ├── storage_1_b.lua │ ├── storage_2_a.lua │ ├── storage_2_b.lua │ ├── suite.ini │ └── test.lua ├── replicaset-luatest │ ├── replicaset_3_test.lua │ ├── suite.ini │ └── vconnect_test.lua ├── router-luatest │ ├── map_callrw_test.lua │ ├── reload_test.lua │ ├── router_2_2_test.lua │ ├── router_3_3_test.lua │ ├── router_election_auto_master_test.lua │ └── suite.ini ├── router │ ├── boot_replica_first.result │ ├── boot_replica_first.test.lua │ ├── box_1_a.lua │ ├── box_1_b.lua │ ├── box_1_c.lua │ ├── complex_call.result │ ├── complex_call.test.lua │ ├── config.lua │ ├── empty_cluster.lua │ ├── empty_cluster.result │ ├── empty_cluster.test.lua │ ├── exponential_timeout.result │ ├── exponential_timeout.test.lua │ ├── map-reduce.result │ ├── map-reduce.test.lua │ ├── master_discovery.result │ ├── master_discovery.test.lua │ ├── reconnect_to_master.result │ ├── reconnect_to_master.test.lua │ ├── reload.result │ ├── reload.test.lua │ ├── reroute_wrong_bucket.result │ ├── reroute_wrong_bucket.test.lua │ ├── retry_reads.result │ ├── retry_reads.test.lua │ ├── router.result │ ├── router.test.lua │ ├── router2.result │ ├── router2.test.lua │ ├── router_1.lua │ ├── router_2.lua │ ├── router_3.lua │ ├── router_and_rebalancing.result │ ├── router_and_rebalancing.test.lua │ ├── storage_1_a.lua │ ├── storage_1_b.lua │ ├── storage_2_a.lua │ ├── storage_2_b.lua │ ├── suite.ini │ ├── sync.result │ ├── sync.test.lua │ ├── test.lua │ ├── wrong_config.result │ └── wrong_config.test.lua ├── storage-luatest │ ├── auto_master_2_2_2_test.lua │ ├── auto_master_2_test.lua │ ├── box_cfg_mode_test.lua │ ├── bucket_triggers_test.lua │ ├── election_auto_master_3_3_test.lua │ ├── garbage_collector_test.lua │ ├── persistent_names_1_1_test.lua │ ├── persistent_names_2_test.lua │ ├── rebalancer_test.lua │ ├── schema_management_mode_test.lua │ ├── service_info_test.lua │ ├── set_persistent_names_test.lua │ ├── storage_1_1_1_test.lua │ ├── storage_1_1_test.lua │ ├── storage_1_test.lua │ ├── storage_call_test.lua │ └── suite.ini ├── storage │ ├── cfg_after_box.result │ ├── cfg_after_box.test.lua │ ├── demote_sync_errinj.result │ ├── demote_sync_errinj.test.lua │ ├── engine.cfg │ ├── read_only_slave.result │ ├── read_only_slave.test.lua │ ├── recovery.result │ ├── recovery.test.lua │ ├── recovery_errinj.result │ ├── recovery_errinj.test.lua │ ├── ref.result │ ├── ref.test.lua │ ├── reload.result │ ├── reload.test.lua │ ├── scheduler.result │ ├── scheduler.test.lua │ ├── storage.result │ ├── storage.test.lua │ ├── storage_1_1.lua │ ├── storage_1_2.lua │ ├── storage_1_3.lua │ ├── storage_1_a.lua │ ├── storage_1_b.lua │ ├── storage_2_a.lua │ ├── storage_2_b.lua │ ├── suite.ini │ ├── sync.result │ ├── sync.test.lua │ └── test.lua ├── test-run.py ├── unit-luatest │ ├── config_test.lua │ ├── error_test.lua │ ├── service_info_test.lua │ ├── storage_exports_test.lua │ ├── suite.ini │ ├── util_test.lua │ └── version_test.lua ├── unit-tap │ ├── heap.test.lua │ ├── ref.test.lua │ ├── scheduler.test.lua │ └── suite.ini ├── unit │ ├── box.lua │ ├── box2.lua │ ├── config.result │ ├── config.test.lua │ ├── engine.cfg │ ├── error.result │ ├── error.test.lua │ ├── rebalancer.result │ ├── rebalancer.test.lua │ ├── reload_evolution.result │ ├── reload_evolution.test.lua │ ├── rlist.result │ ├── rlist.test.lua │ ├── router.result │ ├── router.test.lua │ ├── suite.ini │ ├── upgrade.result │ ├── upgrade.test.lua │ ├── util.result │ └── util.test.lua └── upgrade │ ├── box.lua │ ├── storage_1_a.lua │ ├── storage_1_b.lua │ ├── storage_2_a.lua │ ├── storage_2_b.lua │ ├── suite.ini │ ├── upgrade.result │ └── upgrade.test.lua ├── vshard-scm-1.rockspec └── vshard ├── CMakeLists.txt ├── cfg.lua ├── consts.lua ├── error.lua ├── hash.lua ├── heap.lua ├── init.lua ├── registry.lua ├── replicaset.lua ├── rlist.lua ├── router ├── CMakeLists.txt └── init.lua ├── service_info.lua ├── storage ├── CMakeLists.txt ├── export_log.lua ├── exports.lua ├── init.lua ├── ref.lua ├── reload_evolution.lua ├── sched.lua └── schema.lua ├── util.lua └── version.lua /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | concurrency: 9 | group: ${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | luacheck: 14 | # We want to run on external PRs, but not on our own internal 15 | # PRs as they'll be run by the push to the branch. 16 | # 17 | # The main trick is described here: 18 | # https://github.com/Dart-Code/Dart-Code/pull/2375 19 | if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository 20 | 21 | runs-on: ubuntu-22.04 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | with: 26 | fetch-depth: 0 27 | submodules: recursive 28 | 29 | - name: Install deps 30 | run: | 31 | sudo apt-get -y -f install lua5.1 luarocks 32 | sudo luarocks install luacheck 0.26.1 33 | - name: Luacheck 34 | run: luacheck --codes --config .luacheckrc . 35 | 36 | checkpatch: 37 | if: github.event_name == 'pull_request' 38 | 39 | runs-on: ubuntu-22.04 40 | 41 | steps: 42 | - uses: actions/checkout@v3 43 | with: 44 | fetch-depth: 0 45 | ref: ${{ github.event.pull_request.head.sha }} 46 | 47 | - name: Install deps 48 | uses: actions/checkout@v2 49 | with: 50 | repository: tarantool/checkpatch 51 | path: 'checkpatch' 52 | 53 | - name: Check patch 54 | run: | 55 | checkpatch/checkpatch.pl --color=always --show-types \ 56 | --ignore NO_CHANGELOG \ 57 | --git HEAD~${{ github.event.pull_request.commits }}..HEAD 58 | -------------------------------------------------------------------------------- /.github/workflows/publishing.yml: -------------------------------------------------------------------------------- 1 | name: publishing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | tags: 8 | - '*' 9 | workflow_dispatch: 10 | 11 | env: 12 | ROCK_NAME: vshard 13 | 14 | jobs: 15 | scm: 16 | if: github.ref == 'refs/heads/master' 17 | 18 | runs-on: ubuntu-22.04 19 | 20 | steps: 21 | - uses: actions/checkout@v3 22 | 23 | - name: Push scm rockspec 24 | uses: tarantool/rocks.tarantool.org/github-action@master 25 | with: 26 | auth: ${{ secrets.ROCKS_AUTH }} 27 | files: ${{ env.ROCK_NAME }}-scm-1.rockspec 28 | 29 | version-check: 30 | # We need to run this job only on tag push. 31 | if: startsWith(github.ref, 'refs/tags/') 32 | 33 | runs-on: ubuntu-22.04 34 | 35 | steps: 36 | - name: Check module version 37 | uses: tarantool/actions/check-module-version@master 38 | with: 39 | module-name: vshard 40 | 41 | release: 42 | if: startsWith(github.ref, 'refs/tags') 43 | 44 | runs-on: ubuntu-22.04 45 | 46 | needs: version-check 47 | 48 | steps: 49 | - uses: actions/checkout@v3 50 | 51 | - uses: tarantool/setup-tarantool@v3 52 | with: 53 | tarantool-version: '2.10' 54 | 55 | - name: Create release rockspec 56 | run: > 57 | sed -e "s/version = '.\+'/version = '${{ github.ref_name }}-1'/g" 58 | -e "s/branch = '.\+'/tag = '${{ github.ref_name }}'/g" 59 | ${{ env.ROCK_NAME }}-scm-1.rockspec > 60 | ${{ env.ROCK_NAME }}-${{ github.ref_name }}-1.rockspec 61 | 62 | - name: Create release rock 63 | run: | 64 | tarantoolctl rocks install ${{ env.ROCK_NAME }}-${{ github.ref_name }}-1.rockspec 65 | tarantoolctl rocks pack ${{ env.ROCK_NAME }} ${{ github.ref_name }} 66 | 67 | - name: Push release rockspec and rock 68 | uses: tarantool/rocks.tarantool.org/github-action@master 69 | with: 70 | auth: ${{ secrets.ROCKS_AUTH }} 71 | files: | 72 | ${{ env.ROCK_NAME }}-${{ github.ref_name }}-1.rockspec 73 | ${{ env.ROCK_NAME }}-${{ github.ref_name }}-1.all.rock 74 | -------------------------------------------------------------------------------- /.github/workflows/reusable_testing.yml: -------------------------------------------------------------------------------- 1 | name: reusable_testing 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | artifact_name: 7 | description: 'The name of the tarantool build artifact' 8 | default: ubuntu-jammy 9 | required: false 10 | type: string 11 | 12 | jobs: 13 | run_tests: 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - name: 'Clone the vshard module' 17 | uses: actions/checkout@v4 18 | with: 19 | repository: ${{ github.repository_owner }}/vshard 20 | # Fetch the entire history for all branches and tags. It is needed for 21 | # upgrade testing. 22 | fetch-depth: 0 23 | # Enable recursive submodules checkout as test-run git module is used 24 | # for running tests. 25 | submodules: recursive 26 | 27 | - name: 'Download the tarantool build artifact' 28 | uses: actions/download-artifact@v4 29 | with: 30 | name: ${{ inputs.artifact_name }} 31 | 32 | - name: 'Install tarantool' 33 | # TODO(ylobankov): Install package dependencies. Now we're lucky: all 34 | # dependencies are already there. 35 | run: sudo dpkg -i tarantool*.deb 36 | 37 | - name: 'Install test requirements' 38 | run: pip3 install --user -r test-run/requirements.txt 39 | 40 | - run: cmake . 41 | - run: make test-force 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .*.swp 3 | *.snap 4 | *.xlog 5 | *.log 6 | *.vylog 7 | test/var/ 8 | VERSION 9 | 10 | CMakeCache.txt 11 | CMakeFiles 12 | Makefile 13 | cmake_install.cmake 14 | 15 | *.so 16 | *.dylib 17 | 18 | packpack 19 | build 20 | *.reject 21 | .rocks 22 | build.luarocks 23 | .DS_Store 24 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "test/test-run"] 2 | path = test-run 3 | url = https://github.com/tarantool/test-run.git 4 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | std = 'luajit' 2 | globals = {'box', '_TARANTOOL', 'tonumber64', 'utf8', 'table'} 3 | ignore = { 4 | -- Unused argument . 5 | '212/self', 6 | -- Shadowing a local variable. 7 | '421', 8 | -- Shadowing an upvalue. 9 | '431', 10 | -- Shadowing an upvalue argument. 11 | '432', 12 | } 13 | 14 | include_files = { 15 | 'vshard/**/*.lua', 16 | 'test/**/*_test.lua', 17 | 'test/luatest_helpers/vtest.lua', 18 | 'test/instances/*.lua', 19 | } 20 | 21 | exclude_files = { 22 | 'test/var/*', 23 | } 24 | 25 | local test_rules = { 26 | ignore = { 27 | -- Accessing an undefined variable. 28 | '113/ifiber', 29 | '113/ilt', 30 | '113/imsgpack', 31 | '112/ivconst', 32 | '113/ivconst', 33 | '113/iverror', 34 | '112/ivshard', 35 | '113/ivshard', 36 | '113/ivtest', 37 | '113/ivutil', 38 | '113/iwait_timeout', 39 | '113/iyaml', 40 | } 41 | } 42 | 43 | files['test/**/*_test.lua'] = test_rules 44 | files['test/luatest_helpers/vtest.lua'] = test_rules 45 | files['test/unit-luatest/version_test.lua'] = { 46 | ignore = { 47 | -- Replace comparison sign 48 | '581' 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5 FATAL_ERROR) 2 | 3 | set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) 4 | include(ExternalProject) 5 | 6 | project(vshard C) 7 | if(NOT CMAKE_BUILD_TYPE) 8 | set(CMAKE_BUILD_TYPE Debug) 9 | endif() 10 | 11 | # Find Tarantool 12 | set(TARANTOOL_FIND_REQUIRED ON) 13 | set(CMAKE_INSTALL_DATADIR "" ) 14 | find_package(Tarantool) 15 | include_directories(${TARANTOOL_INCLUDE_DIRS}) 16 | 17 | add_definitions("-D_GNU_SOURCE") 18 | 19 | # Set CFLAGS 20 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") 21 | set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall -Wextra") 22 | 23 | # Build module 24 | add_subdirectory(vshard) 25 | # Enable tests 26 | add_subdirectory(test) 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017-2018 Tarantool AUTHORS: please see AUTHORS file. 2 | 3 | Redistribution and use in source and binary forms, with or 4 | without modification, are permitted provided that the following 5 | conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above 8 | copyright notice, this list of conditions and the 9 | following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials 14 | provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 20 | AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 | BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 27 | THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 | SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /changelogs/0.1.20.md: -------------------------------------------------------------------------------- 1 | Date: 23-05-2022 2 | 3 | Tag: 0.1.20 4 | 5 | ## Compatibility 6 | 7 | VShard 0.1.20 is fully compatible with the previous VShard versions. But since 8 | this release VShard supports only Tarantool >= 1.10.1. 9 | 10 | ## Functionality added or changed 11 | 12 | * The configuration option `collect_lua_garbage` is deprecated. Now it doesn't 13 | do anything except printing a warning in the logs that it should not be used. 14 | 15 | * The router now supports `msgpack.object` feature. It can be passed to all 16 | `vshard.router.call...()` and `.map_call...()` functions. They can also accept 17 | the netbox option `return_raw` to get an msgpack object in return (gh-312). 18 | 19 | * The URIs in the config now support everything what can be passed to 20 | `box.cfg.listen` and `netbox.connect()`: number, string, table. There is also 21 | a new config option - `.listen`. It can be used to setup 22 | listen on multiple ports on storages and for SSL in Tarantool Enterprise 23 | (gh-325). 24 | 25 | ## Bugs fixed 26 | 27 | * Fixed a bug when router could raise a Lua exception about `vshard.error` being 28 | not defined if a storage was disabled (implicitly or via 29 | `vshard.storage.disable()`). 30 | 31 | * Fixed a bug when router could raise a Lua exception about `master_uuid` 32 | variable being not defined when `master='auto'` feature was used. 33 | 34 | * Fixed a bug when usage of `memtx_use_mvcc_engine` option in `box.cfg` could 35 | make bucket garbage collector not work. As a result, rebalancing also could 36 | stop working and there could be `'garbage'` and `'sent'` buckets visible in 37 | the monitoring (gh-314). 38 | -------------------------------------------------------------------------------- /changelogs/0.1.21.md: -------------------------------------------------------------------------------- 1 | Date: 20-08-2022 2 | 3 | Tag: 0.1.21 4 | 5 | ## Compatibility 6 | 7 | VShard 0.1.21 is fully compatible with the previous VShard versions. 8 | 9 | ## Bugs fixed 10 | 11 | * Fixed a bug that the rebalancing didn't work if the sharded spaces had 12 | unusual types like 'varbinary' (gh-327, gh-328). 13 | 14 | * A pack of fixes for problems with accessing buckets on replicas. They were not 15 | properly protected against rebalancer and users could read too outdated data 16 | or even inconsistent data (gh-173). 17 | 18 | * Fixed a bug that if vshard's connection netbox fibers were killed on router or 19 | storage, the connection could be not re-established on some Tarantool versions 20 | making the peer inaccessible from the given node at all (gh-341). 21 | -------------------------------------------------------------------------------- /changelogs/0.1.22.md: -------------------------------------------------------------------------------- 1 | Date: 20-02-2023 2 | 3 | Tag: 0.1.22 4 | 5 | ## Compatibility 6 | 7 | VShard 0.1.22 is fully compatible with the previous VShard versions. 8 | 9 | ## Functionality added or changed 10 | 11 | * A new trigger `vshard.storage.on_bucket_event()` to react on bucket events, 12 | such as move or GC (gh-372). 13 | 14 | * `vshard.router/storage.info()` now accept an option `{with_services = true}` 15 | to get additional info about background services (such as discovery or 16 | rebalancer) that are working on the current instance (gh-107). 17 | 18 | ## Bugs fixed 19 | 20 | * Fixed a bug that the rebalancing could lead to inconsistency in buckets. In 21 | scope of the same bugfix all the updates in `_bucket` space are now severely 22 | restricted and validated (gh-377, gh-173). 23 | 24 | * `vshard.router/storage.cfg{}` now can not be called from multiple fibers at 25 | the same time (gh-140). 26 | 27 | * Fixed a bug that certain router error objects didn't have `router_name` field. 28 | -------------------------------------------------------------------------------- /changelogs/0.1.23.md: -------------------------------------------------------------------------------- 1 | Date: 05-04-2023 2 | 3 | Tag: 0.1.23 4 | 5 | ## Compatibility 6 | 7 | VShard 0.1.23 is fully compatible with the previous VShard versions. 8 | 9 | ## Functionality added or changed 10 | 11 | * New key `require('vshard')._VERSION` stores the current VShard version as a 12 | string. 13 | 14 | ## Bugs fixed 15 | 16 | * Fixed a bug that deletion of any space didn't work if it wasn't truncated 17 | before (gh-400). 18 | -------------------------------------------------------------------------------- /changelogs/0.1.24.md: -------------------------------------------------------------------------------- 1 | Date: 24-05-2023 2 | 3 | Tag: 0.1.24 4 | 5 | ## Compatibility 6 | 7 | VShard 0.1.24 is fully compatible with the previous VShard versions. 8 | 9 | ## Functionality added or changed 10 | 11 | * Added support of Tarantool 3.0 (gh-402). 12 | 13 | ## Bugs fixed 14 | 15 | * Fixed some router reload bugs (gh-141). 16 | * Fixed a bug which could duplicate buckets, especially when 17 | `vshard.storage.bucket_send()` is used manually (gh-414). 18 | -------------------------------------------------------------------------------- /cmake/FindMsgPuck.cmake: -------------------------------------------------------------------------------- 1 | # - Find libmsgpuck header-only library 2 | # The module defines the following variables: 3 | # 4 | # MSGPUCK_FOUND - true if MsgPuck was found 5 | # MSGPUCK_INCLUDE_DIRS - the directory of the MsgPuck headers 6 | # MSGPUCK_LIBRARIES - the MsgPuck static library needed for linking 7 | # 8 | 9 | find_path(MSGPUCK_INCLUDE_DIR msgpuck.h PATH_SUFFIXES msgpuck) 10 | find_library(MSGPUCK_LIBRARY NAMES libmsgpuck.a) 11 | 12 | include(FindPackageHandleStandardArgs) 13 | find_package_handle_standard_args(MsgPuck 14 | REQUIRED_VARS MSGPUCK_INCLUDE_DIR MSGPUCK_LIBRARY) 15 | set(MSGPUCK_INCLUDE_DIRS ${MSGPUCK_INCLUDE_DIR}) 16 | set(MSGPUCK_LIBRARIES ${MSGPUCK_LIBRARY}) 17 | mark_as_advanced(MSGPUCK_INCLUDE_DIR MSGPUCK_INCLUDE_DIRS 18 | MSGPUCK_LIBRARY MSGPUCK_LIBRARIES) 19 | -------------------------------------------------------------------------------- /cmake/FindTarantool.cmake: -------------------------------------------------------------------------------- 1 | # Define GNU standard installation directories 2 | include(GNUInstallDirs) 3 | 4 | macro(extract_definition name output input) 5 | string(REGEX MATCH "#define[\t ]+${name}[\t ]+\"([^\"]*)\"" 6 | _t "${input}") 7 | string(REGEX REPLACE "#define[\t ]+${name}[\t ]+\"(.*)\"" "\\1" 8 | ${output} "${_t}") 9 | endmacro() 10 | 11 | find_path(TARANTOOL_INCLUDE_DIR tarantool/module.h 12 | HINTS ${TARANTOOL_DIR} ENV TARANTOOL_DIR 13 | PATH_SUFFIXES include 14 | ) 15 | 16 | if(TARANTOOL_INCLUDE_DIR) 17 | set(_config "-") 18 | file(READ "${TARANTOOL_INCLUDE_DIR}/tarantool/module.h" _config0) 19 | string(REPLACE "\\" "\\\\" _config ${_config0}) 20 | unset(_config0) 21 | extract_definition(PACKAGE_VERSION TARANTOOL_VERSION ${_config}) 22 | extract_definition(INSTALL_PREFIX _install_prefix ${_config}) 23 | unset(_config) 24 | endif() 25 | 26 | include(FindPackageHandleStandardArgs) 27 | find_package_handle_standard_args(TARANTOOL 28 | REQUIRED_VARS TARANTOOL_INCLUDE_DIR VERSION_VAR TARANTOOL_VERSION) 29 | if(TARANTOOL_FOUND) 30 | set(TARANTOOL_INCLUDE_DIRS "${TARANTOOL_INCLUDE_DIR}" 31 | "${TARANTOOL_INCLUDE_DIR}/tarantool/" 32 | CACHE PATH "Include directories for Tarantool") 33 | set(TARANTOOL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/tarantool" 34 | CACHE PATH "Directory for storing Lua modules written in Lua") 35 | set(TARANTOOL_INSTALL_LUADIR "${CMAKE_INSTALL_DATADIR}/tarantool" 36 | CACHE PATH "Directory for storing Lua modules written in C") 37 | 38 | if (NOT TARANTOOL_FIND_QUIETLY AND NOT FIND_TARANTOOL_DETAILS) 39 | set(FIND_TARANTOOL_DETAILS ON CACHE INTERNAL "Details about TARANTOOL") 40 | message(STATUS "Tarantool LUADIR is ${TARANTOOL_INSTALL_LUADIR}") 41 | message(STATUS "Tarantool LIBDIR is ${TARANTOOL_INSTALL_LIBDIR}") 42 | endif () 43 | endif() 44 | mark_as_advanced(TARANTOOL_INCLUDE_DIRS TARANTOOL_INSTALL_LIBDIR 45 | TARANTOOL_INSTALL_LUADIR) 46 | -------------------------------------------------------------------------------- /debian/.gitignore: -------------------------------------------------------------------------------- 1 | tarantool-shard/ 2 | files 3 | stamp-* 4 | *.substvars 5 | *.log 6 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | tarantool-vshard (0.1.0-1) unstable; urgency=medium 2 | 3 | * Initial release. 4 | 5 | -- Roman Tsisyk Fri, 22 Dec 2017 13:00:00 +0300 6 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 9 2 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: tarantool-vshard 2 | Priority: optional 3 | Section: database 4 | Maintainer: Vladislav Shpilevoy 5 | Build-Depends: debhelper (>= 9), 6 | tarantool (>= 1.9.0), 7 | tarantool-dev (>= 1.9.0), 8 | Standards-Version: 3.9.6 9 | Homepage: https://github.com/tarantool/vshard 10 | Vcs-Git: git://github.com/tarantool/vshard.git 11 | Vcs-Browser: https://github.com/tarantool/vshard 12 | 13 | Package: tarantool-vshard 14 | Architecture: all 15 | Depends: tarantool (>= 1.9.0), ${misc:Depends} 16 | Description: The new generation of sharding based on virtual buckets 17 | The new generation of sharding based on virtual buckets. 18 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Debianized-By: Roman Tsisyk 3 | Upstream-Name: tarantool-shard 4 | Upstream-Contact: support@tarantool.org 5 | Source: https://github.com/tarantool/shard 6 | 7 | Files: * 8 | Copyright: 2015-2016 Tarantool AUTHORS 9 | License: BSD-2-Clause 10 | Redistribution and use in source and binary forms, with or without 11 | modification, are permitted provided that the following conditions 12 | are met: 13 | 1. Redistributions of source code must retain the above copyright 14 | notice, this list of conditions and the following disclaimer. 15 | 2. Redistributions in binary form must reproduce the above copyright 16 | notice, this list of conditions and the following disclaimer in the 17 | documentation and/or other materials provided with the distribution. 18 | . 19 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 | SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /debian/docs: -------------------------------------------------------------------------------- 1 | README.md 2 | -------------------------------------------------------------------------------- /debian/prebuild.sh: -------------------------------------------------------------------------------- 1 | curl -s https://packagecloud.io/install/repositories/tarantool/1_10/script.deb.sh | sudo bash 2 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | DEB_CMAKE_EXTRA_FLAGS := -DCMAKE_INSTALL_LIBDIR=lib/$(DEB_HOST_MULTIARCH) \ 4 | -DCMAKE_BUILD_TYPE=RelWithDebInfo 5 | # temporary disable tests - Debian has old gevent 6 | DEB_MAKE_CHECK_TARGET := 7 | 8 | include /usr/share/cdbs/1/rules/debhelper.mk 9 | include /usr/share/cdbs/1/class/cmake.mk 10 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /example/.tarantoolctl: -------------------------------------------------------------------------------- 1 | local workdir = './data/' 2 | local fio = require('fio') 3 | if not fio.stat('./data') then 4 | fio.mkdir('./data') 5 | end 6 | 7 | default_cfg = { 8 | pid_file = workdir, 9 | wal_dir = workdir, 10 | snap_dir = workdir, 11 | vinyl_dir = workdir, 12 | logger = workdir, 13 | } 14 | 15 | instance_dir = "." 16 | 17 | -- vim: set ft=lua ts=4 sts=4 sw=4 et: 18 | -------------------------------------------------------------------------------- /example/Makefile: -------------------------------------------------------------------------------- 1 | all: stop clean start enter 2 | 3 | start: 4 | tarantoolctl start storage_1_a 5 | tarantoolctl start storage_1_b 6 | tarantoolctl start storage_2_a 7 | tarantoolctl start storage_2_b 8 | tarantoolctl start router_1 9 | @echo "Waiting cluster to start" 10 | @sleep 1 11 | echo "vshard.router.bootstrap()" | tarantoolctl enter router_1 12 | 13 | stop: 14 | tarantoolctl stop storage_1_a 15 | tarantoolctl stop storage_1_b 16 | tarantoolctl stop storage_2_a 17 | tarantoolctl stop storage_2_b 18 | tarantoolctl stop router_1 19 | 20 | enter: 21 | tarantoolctl enter router_1 22 | 23 | logcat: 24 | tail -f data/*.log 25 | 26 | clean: 27 | rm -rf data/ 28 | 29 | load: 30 | tarantoolctl start generate_load.lua 31 | 32 | .PHONY: console test deploy clean 33 | -------------------------------------------------------------------------------- /example/localcfg.lua: -------------------------------------------------------------------------------- 1 | return { 2 | sharding = { 3 | ['cbf06940-0790-498b-948d-042b62cf3d29'] = { -- replicaset #1 4 | replicas = { 5 | ['8a274925-a26d-47fc-9e1b-af88ce939412'] = { 6 | uri = 'storage:storage@127.0.0.1:3301', 7 | name = 'storage_1_a', 8 | master = true 9 | }, 10 | ['3de2e3e1-9ebe-4d0d-abb1-26d301b84633'] = { 11 | uri = 'storage:storage@127.0.0.1:3302', 12 | name = 'storage_1_b' 13 | } 14 | }, 15 | }, -- replicaset #1 16 | ['ac522f65-aa94-4134-9f64-51ee384f1a54'] = { -- replicaset #2 17 | replicas = { 18 | ['1e02ae8a-afc0-4e91-ba34-843a356b8ed7'] = { 19 | uri = 'storage:storage@127.0.0.1:3303', 20 | name = 'storage_2_a', 21 | master = true 22 | }, 23 | ['001688c3-66f8-4a31-8e19-036c17d489c2'] = { 24 | uri = 'storage:storage@127.0.0.1:3304', 25 | name = 'storage_2_b' 26 | } 27 | }, 28 | }, -- replicaset #2 29 | }, -- sharding 30 | replication_connect_quorum = 0, 31 | } 32 | -------------------------------------------------------------------------------- /example/router.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | fiber = require('fiber') 5 | 6 | -- Check if we are running under test-run 7 | if os.getenv('ADMIN') then 8 | test_run = require('test_run').new() 9 | require('console').listen(os.getenv('ADMIN')) 10 | end 11 | 12 | replicasets = {'cbf06940-0790-498b-948d-042b62cf3d29', 13 | 'ac522f65-aa94-4134-9f64-51ee384f1a54'} 14 | 15 | -- Call a configuration provider 16 | cfg = dofile('localcfg.lua') 17 | if arg[1] == 'discovery_disable' then 18 | cfg.discovery_mode = 'off' 19 | end 20 | 21 | if not os.getenv('ADMIN') then 22 | cfg.listen = 3305 23 | end 24 | -- Start the database with sharding 25 | vshard = require('vshard') 26 | vshard.router.cfg(cfg) 27 | if not os.getenv('ADMIN') then 28 | -- Allow load generator to execute arbitrary functions. 29 | box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) 30 | end 31 | -------------------------------------------------------------------------------- /example/router_1.lua: -------------------------------------------------------------------------------- 1 | router.lua -------------------------------------------------------------------------------- /example/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | storage.lua -------------------------------------------------------------------------------- /example/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | storage.lua -------------------------------------------------------------------------------- /example/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | storage.lua -------------------------------------------------------------------------------- /example/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | storage.lua -------------------------------------------------------------------------------- /example/vshard: -------------------------------------------------------------------------------- 1 | ../vshard -------------------------------------------------------------------------------- /rpm/prebuild.sh: -------------------------------------------------------------------------------- 1 | curl -s https://packagecloud.io/install/repositories/tarantool/1_10/script.rpm.sh | sudo bash 2 | sudo yum -y install python-devel python-pip 3 | sudo pip install tarantool msgpack 4 | -------------------------------------------------------------------------------- /rpm/tarantool-vshard.spec: -------------------------------------------------------------------------------- 1 | Name: tarantool-vshard 2 | Version: 0.1.0 3 | Release: 1%{?dist} 4 | Summary: The new generation of sharding based on virtual buckets 5 | Group: Applications/Databases 6 | License: BSD 7 | URL: https://github.com/tarantool/vshard 8 | Source0: https://github.com/tarantool/vshard/archive/%{version}/vshard-%{version}.tar.gz 9 | BuildArch: noarch 10 | BuildRequires: cmake >= 2.8 11 | BuildRequires: gcc >= 4.5 12 | BuildRequires: tarantool >= 1.9.0 13 | BuildRequires: tarantool-devel >= 1.9.0 14 | Requires: tarantool >= 1.9.0 15 | 16 | # For tests 17 | %if (0%{?fedora} >= 22 || 0%{?rhel} >= 7) 18 | BuildRequires: python >= 2.7 19 | BuildRequires: python-six >= 1.9.0 20 | BuildRequires: python-gevent >= 1.0 21 | BuildRequires: python-yaml >= 3.0.9 22 | %endif 23 | 24 | %description 25 | The new generation of sharding based on virtual buckets. 26 | 27 | %prep 28 | %setup -q -n vshard-%{version} 29 | 30 | %build 31 | %cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo 32 | make %{?_smp_mflags} 33 | 34 | %check 35 | %if (0%{?fedora} >= 22 || 0%{?rhel} >= 7) 36 | make test 37 | %endif 38 | 39 | %install 40 | %make_install 41 | 42 | %files 43 | # %{_libdir}/tarantool/vshard/ 44 | %{_datarootdir}/tarantool/vshard/ 45 | %doc README.md 46 | %{!?_licensedir:%global license %doc} 47 | %license LICENSE 48 | 49 | %changelog 50 | * Fri Dec 22 2017 Roman Tsisyk 0.1.0-1 51 | - Initial version 52 | -------------------------------------------------------------------------------- /sharding_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tarantool/vshard/c83deb1983757df1b2ab030776a72bea919cf25b/sharding_arch.png -------------------------------------------------------------------------------- /test/.tarantoolctl: -------------------------------------------------------------------------------- 1 | -- Options for test-run tarantoolctl 2 | local workdir = os.getenv('TEST_WORKDIR') 3 | default_cfg = { 4 | pid_file = workdir, 5 | wal_dir = workdir, 6 | snap_dir = workdir, 7 | vinyl_dir = workdir, 8 | logger = workdir, 9 | background = false, 10 | } 11 | 12 | instance_dir = workdir 13 | 14 | -- vim: set ft=lua : 15 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_custom_target(test 2 | COMMAND ${PROJECT_SOURCE_DIR}/test/test-run.py -j -1 3 | --builddir=${PROJECT_BINARY_DIR} 4 | --vardir=${PROJECT_BINARY_DIR}/test/var) 5 | 6 | add_custom_target(test-force 7 | COMMAND ${PROJECT_SOURCE_DIR}/test/test-run.py -j -1 8 | --builddir=${PROJECT_BINARY_DIR} 9 | --vardir=${PROJECT_BINARY_DIR}/test/var 10 | --force) 11 | -------------------------------------------------------------------------------- /test/certs/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFjTCCA3WgAwIBAgIUUvIlnJRQ9UlJ2kJzyTRUenIWcEUwDQYJKoZIhvcNAQEL 3 | BQAwVTEQMA4GA1UECwwHVW5rbm93bjEQMA4GA1UECgwHVW5rbm93bjEQMA4GA1UE 4 | BwwHVW5rbm93bjEQMA4GA1UECAwHdW5rbm93bjELMAkGA1UEBhMCQVUwIBcNMjEx 5 | MjI4MTE1ODQ5WhgPMjEyMTEyMDQxMTU4NDlaMFUxEDAOBgNVBAsMB1Vua25vd24x 6 | EDAOBgNVBAoMB1Vua25vd24xEDAOBgNVBAcMB1Vua25vd24xEDAOBgNVBAgMB3Vu 7 | a25vd24xCzAJBgNVBAYTAkFVMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC 8 | AgEA05mQ1Vd1BXKLoYsatqGRTAyfod7oRFIfDxwR49YcylsqlzYhH8iOMw0d1zZa 9 | PfU0SasvZm07qVzlf/jXfNIgVvRZhqVevAWScghUYFoSeBy6eK+OSxso8TBOGnvs 10 | U1lmbC9rM/ZvdnRa+qBrl6St+G0z9ry9jsX2Wf6ElM8zo53k0T/zT160GHrWMBlV 11 | REnCqJjwJ4P9bgOMSyYKLVje3V4ZinhRZCz4iypfPz0uZpG4K3otp9V4wfZMQbO9 12 | /+OMj7j2s+nGLza1v4RxyQcagkoTcIJK9F5vcimhb05QGcevAPcu1aAgUbEgqM2w 13 | vqrHFZ0kis0+nzvgWfAGzHI/nO65A1adxw09Jt1zptsBgJX6QnODmP5URS6AmSBp 14 | HXMJvySIQ2yLwchV5qJxDj/w6h4e3C9EUJdjdy8Wz1ffCkm5ebDR+rZgZMuIQxCa 15 | EDfjrVAggbXookx6gTOjtJJK0OhwOxI6Kq0T1LQULr86tJAsfULIOVZEi4kVWD7z 16 | FBGJFp9nacAh0EqomI1f5znXXfKDzF1nGPKlnVylKGs+TTDB374FmInzqnkbUYHq 17 | ARVJQKY2vG69WLkZqbiLcnxZGAJ+g96vRB0NRC4GodqzwIFOd4AY70fweOjDN67s 18 | /MBDpW5L8RafADxAm8p1HbePvmhi36zkL8IamuxDMHstjGMCAwEAAaNTMFEwHQYD 19 | VR0OBBYEFGZSXM5z+yfXZqPTz5JQDyliwK+GMB8GA1UdIwQYMBaAFGZSXM5z+yfX 20 | ZqPTz5JQDyliwK+GMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB 21 | AIpBT6Y+tPKT2iLu/cXsr8U1UXZLU+H9/Pb6Yu5rFGpNtKDJWJG7KCLjfgs5gPvR 22 | k2/TC6bS9Wg49RXcmXKRH6uLIltRt7+IQEG/kkYQUvKGefMeDl8yfLLWkjhIiGnx 23 | xdWAc3DqZpjHcngCkrNwr4KDHmf5KEbAeaan9HrUieBC9qnH0vwOsbjpxAP7cGEF 24 | IzjNI7dfkoWfaFOsy4EpB2zlG019neQSdHn5kkzTvtaPfBHIJQoixMe2FHoYv1BR 25 | 71otNzLfgdaAMKjrAXUM6oqvPOR8q7TjuPcE1wmYrDH6HZGEG+251JfeVVkIXe6O 26 | bhL6xp/czTNGvZqQ4slF/ARaY3ZpLDeLcdUuFzujrn6IxqPrPJbmQWLEn5UPjyJb 27 | GrJFrMdvR4ruAXG61cF876CvijSHghA4yxJeAquDsB70bYh/NwfnM2/eortXR8aH 28 | qO2QZeGFWTxe1J56emwFonqrfQRNf6j/PpyyXHGSkcRHzbFqBsSdeL48VNkx0PzK 29 | nL2xOeCQqI0dFn+BOaU+Ud4+kqcjPxuHwxjTodB2GCGpqPpiIHKeXuc3BpA0LTca 30 | C/+4QqhZ3D2JHzLk9RCW4gpShQmr3HtTqvY0ykB6oLrxpPZTuUtjFXKNcc5tetvJ 31 | iXdPxKJ1lk5pS7zYT6wSvxTMRzg3jE/fqIrw/lJtXP86 32 | -----END CERTIFICATE----- 33 | -------------------------------------------------------------------------------- /test/certs/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFSjCCAzICFBXGln0k+aZc9YBYLwvGmLLUjkFPMA0GCSqGSIb3DQEBCwUAMFUx 3 | EDAOBgNVBAsMB1Vua25vd24xEDAOBgNVBAoMB1Vua25vd24xEDAOBgNVBAcMB1Vu 4 | a25vd24xEDAOBgNVBAgMB3Vua25vd24xCzAJBgNVBAYTAkFVMCAXDTIxMTIyODEx 5 | NTg1MFoYDzIxMjExMjA0MTE1ODUwWjBsMRUwEwYDVQQDDAx0YXJhbnRvb2wuaW8x 6 | EDAOBgNVBAsMB1Vua25vd24xEDAOBgNVBAoMB1Vua25vd24xEDAOBgNVBAcMB1Vu 7 | a25vd24xEDAOBgNVBAgMB3Vua25vd24xCzAJBgNVBAYTAkFVMIICIjANBgkqhkiG 8 | 9w0BAQEFAAOCAg8AMIICCgKCAgEAl9/UWRYbdrhEzonRLqlatx+YSOaacFAKw5F9 9 | uwLv6sPsgOAsWUifnmkb5VNjgbYDkEtJn6B0o5ShFwkFj2nFZhnkaTGhgHQbhkY0 10 | S1mfEGa7A2ZG2xb2T5fxbAaOw9og9N+wGnbrOS8F/MKSJJk3qe2ap5P+LqAySGzp 11 | xT6/Uwi9a7/X8qwjaHRPC87gcMAIk1a1ZhxFvDQMnlSkL12FAqbIUH5oNaF1CWpX 12 | je8WNvHWeDVHDy8kV2soQQtRGUCUNaOFvIo5xHr/FVk8MW9evw/GLCYZz9bqiFav 13 | nJniHikHcOwMW5qz8ihQqL8HaohKlDyiZOS2uuFAV6ZOJXEj0K3rcYlP3Oj2f4OY 14 | cuKIkkmoUlL2Ub7qes0T0OZqHzmXLON6G7bvRT5KK+vc4JFCrODyMSRTInqyz9rX 15 | 06hiBZt5UCA7rIIXfBLPuT+SKfpuoZuZyG3q7bz9PC9hY3/0fmePgHBcKsdxIcTX 16 | gFcwYgc/KovcjD8pe6uuJPm7Cjt4+FfYh3+NE5sM9Y1DwHQXgJLwFEjo90L8HXVN 17 | +960PZrLCHJqnU4TBiGfDOhK1EjAuPWOMshlNiIkaeksiprH+KnELG795yDh/Nma 18 | /mB0g8JidNJSAk8y9Q2b5FfTYrTrGYDb9QGanCi/Mdf8p0md7SpySGJURJVBGMWG 19 | hp0rj5ECAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAYae+RckhUp+bZG4KQpnFD8n+ 20 | Q4ixo/KZJIF+6okhIR/QhVsCdPHrL5bCkFGkFAUNH6IBmg68v368jRPL4J3L3JJB 21 | RXVvEN+a3TFoD3L2HVB+ZZs4ykCDLlzoGR7pkL32iqdUCe1mKEDIZTgBcCHePmM4 22 | QXeGhNYm5KD/qJYec9MjlhHmm6ofqlE9V6Lg5XqWYMfpBN74GFgA5GdtiN4XWMt9 23 | 00Sb0SkusK/vU6wb8VM+yotWsZgiUnn69rjxOAaUiC1iCGfsxg+ChBISyBHvYdgs 24 | 7FwrFGtfnBgh47CoSENrQ4hBmsjmsm44F0ZLQ5uQ4IXZZtgaZDpN1IcdwU766cUF 25 | UJ9iTb5IfFpSv+f8c+YvLMmSQkVCA92tbW9IchOcJs49V6+ltNDXhKFSueDjIuVb 26 | UHCWeF38XH+LP6Rc3Bxw7nlcdOgtWdVSm8jPzjKKkauvtvfVneaXZyiQSNWcxuqh 27 | 6V09LTvt8xRZgfv+Z/DSA+rV4QKKzsQTpSxnaqxJ5mn/oD7IbRZnCdZw2PkqsNHT 28 | SB84iOAKluHEaPygiGlwkOVXm5fwEC4+Q46flhRuipp9n54A8L+J4KburP7V5K7x 29 | +hkKmWcWmlXku+4kIFAODSVin8+JOQLxgrQkkweP5sv6+6hQhAK8F+GWkNTWAK2F 30 | YGWr/YtBFKXm9dh5xKY= 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /test/certs/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCX39RZFht2uETO 3 | idEuqVq3H5hI5ppwUArDkX27Au/qw+yA4CxZSJ+eaRvlU2OBtgOQS0mfoHSjlKEX 4 | CQWPacVmGeRpMaGAdBuGRjRLWZ8QZrsDZkbbFvZPl/FsBo7D2iD037Aadus5LwX8 5 | wpIkmTep7Zqnk/4uoDJIbOnFPr9TCL1rv9fyrCNodE8LzuBwwAiTVrVmHEW8NAye 6 | VKQvXYUCpshQfmg1oXUJaleN7xY28dZ4NUcPLyRXayhBC1EZQJQ1o4W8ijnEev8V 7 | WTwxb16/D8YsJhnP1uqIVq+cmeIeKQdw7AxbmrPyKFCovwdqiEqUPKJk5La64UBX 8 | pk4lcSPQretxiU/c6PZ/g5hy4oiSSahSUvZRvup6zRPQ5mofOZcs43obtu9FPkor 9 | 69zgkUKs4PIxJFMierLP2tfTqGIFm3lQIDusghd8Es+5P5Ip+m6hm5nIbertvP08 10 | L2Fjf/R+Z4+AcFwqx3EhxNeAVzBiBz8qi9yMPyl7q64k+bsKO3j4V9iHf40Tmwz1 11 | jUPAdBeAkvAUSOj3QvwddU373rQ9mssIcmqdThMGIZ8M6ErUSMC49Y4yyGU2IiRp 12 | 6SyKmsf4qcQsbv3nIOH82Zr+YHSDwmJ00lICTzL1DZvkV9NitOsZgNv1AZqcKL8x 13 | 1/ynSZ3tKnJIYlRElUEYxYaGnSuPkQIDAQABAoICAAY0uUKiqqChSv59lzSm6vQM 14 | sHZaJfls6+Ot8EtSBWe8dc+WWXmUhCE+/krSq4BDZcXhrbJ+JgYBLsv4LGf91Hkl 15 | mbXKt2S+WdmUyNJDaUKMUBwFc+PD/nCmLtFRO0g5g5s+AL40GcyOZVo8wSgJssKY 16 | 2Pb8DyjsluFqweqzc7H663KZtkNOUWwVRJ2g6H7y+ea7ZlIz2Y2lOtbFcqgU1DiV 17 | hB317GA0psmptMYx/OBl5XOoCa1PKdcAo/Z0de3oO6YtdmuL4mxq8j13FgSXHnIE 18 | PP7U170sdQz5LdfiWeENoCY3HxInMi81AobCt5Tc2N9R0vj4d5cPWUe7nfnds67l 19 | LwuAhOHq1ppVyB9k7R8UwGo+sZW6TPHKmAyZ/2RRilXkZlwJo+DlI/jJAT1oxjk3 20 | PceQsOW5B1/JXvax37G8yQYS1YEv3ZWeJ/ePwFJ7rwWQE3KLya9AxDUfI8b07hdP 21 | T1IQ+MNH2Vnyc7ygsYk98MPaOBn6H1/A+kKlVve9lwqOxbc0y4NmcsKSyRRq9X3y 22 | OEaXUlzVA/La1gSNdBt2Zo5oSRGrK7SyzYtiosq1Q7fwNC7YI8G648DIS/ew8+nH 23 | CRrbgI6JacWoPPHf9DA4nc0oP+B+kF+5Ru8BZyx4tKFE+GJS3Z/LVdIa6+/sIrnW 24 | rMLbhwk6Im9tOszFe1wBAoIBAQDGMU5YwHCt2VgRZnM9Q0S7jaSYE0RGfInFx+ze 25 | +H+qJ045MSyNOtfOTzXJIQ9XxAnyH3q8LLydHBgFtdHxt1Z4fxGUKBaxDWsrhEi6 26 | UtAV76Av/VIuV8u8DOulOOsCv4hD38fO04+6jHnHMoF0XLH0SJjQCM7djXVA+7Wn 27 | tApmqrjcbAtbHLx5YJ2O3+F2qKtdGPU/eNbWgM5nU908dT1qjpGrNah5bIG6X0/W 28 | o8xZxVjVB1dayRViMOHONSDJBJ/PZCjMDjGVbslDLmK53q6ElzbugaoGXUvi4AAc 29 | kVA5qIJiZXJlHNqs+utfWNjM7g/yVCTIJR9K5CimmkU1qCexAoIBAQDELAUGFHKR 30 | GTfZ92IBwZL34Qkz7MNM9zSP0cyCL9MXf/vYuCnbhGNl9KjNdi96iJRDO5jrQqdz 31 | LwQ8flgM2uaI/7Tn4wWHvaPzkgPDAxrM1hFgCxmYdHF+mbem8QRe5itYxrktNl71 32 | ggXh89pT/oUbX+7waHcAvU2yMyPW8IbyjmCTS8s+wQC37mWciQme/rXiFDlEnWxi 33 | dx12SC6+epBLkv6hyL+oWtHJHR2Z88KG72gqExaqWZtuLCeNCJ8SnQovLY0KzqFI 34 | H1vtW2Pt5GTv+nyYMilmxJ4chfMcpqYW+PxLncVoTVKAxj2bDLAUz2mbq0e1iL2O 35 | E19shfhYA73hAoIBAQCt0W5Z7iSkg9p2IDjvQMDWEIqLCVK20uBtwkJe5ufoTeKP 36 | zC27iyDbgqK0S7dODNOAMtWMlVuyGoKC94wyBs3ijebX3jTQgOk92B2Es75ljqwZ 37 | inz5wewfd+FvHTUXSu1yQZ/WJ7uFK1BeQFBCwk0b5lOHGEJ9EmQEYv1VIFSC6DWK 38 | s4NeHBy8At0MAhOV4qWv14S3bv5UT9E109pS/KupeXIOCtg/7cb0skSuBSfij26y 39 | HmchavvDCwXuuK+RPOn/lGMnW0dnPCttLRBO3RqHlKsLkFKSlF5KkpPmehc3Ci+X 40 | pksmnKC4VrP5Ak3ZpOv9Zoh/s/b20pVIUEJkpZ5xAoIBAFLmQ005cNzOamLK/vi9 41 | A82KpKfzwuiQgv7P6vuNGEiTBiukT9P7U0HvrTH+YrwGYZkz40BwD3FGiiTCQ5VT 42 | IB7+6EUiE+E0C8fPiZVsh3POdtM+fZeGhbu5W/nJWk58OA8xE2n+0wwu4SuGMFpU 43 | cY2K8d9YVgnS9bV7lQas6GoyLq+wVPh0+UfBD+Ghq5YsD1K1tym+2OGXcrTcdhx6 44 | kOXO/ZoCyrNrhcHWcv7vGuKI41z8ahSc/2kEccZQwWBNStN7DWJmxZND5UYd29HZ 45 | NaqQEP0EHajXJ/XnyT/vULreh0exKY4VNutCWyBdsT9qEzf8nrYhTfFvUGD9EGCp 46 | XCECggEAI0OCpVa73fUK2WH63dk1n1McIkbm15EnIhS/n8u1REFJiBXS3VODlVZv 47 | Meom4MBrhHIavxVHUy18k9oD5ghk3a40l5EugsHJ/taTazjQp4u1WwHBgjdtbQ05 48 | J3kCx7np7cNinxDObbYyJbKa1fvi+5QwNAoZeXd10s4TB+4iEtlX1phl7+Q/3LpM 49 | iNIorh3Sybl0CsKOomTTssbHmxFE5hAGOPWmEhf7e2YECZi2btsAt5H6WX0mgHmo 50 | uRwQwMZleF9HeSuTQWnhK9WqEj1vcNaYChvGAEkBZnYhjjVIUFFEkIsF3iDHElmB 51 | vKS6Rdd9zvgOfxk6xh/fG0ri5TtW5Q== 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /test/failover/box_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | -- Get instance name 3 | require('strict').on() 4 | local fio = require('fio') 5 | NAME = fio.basename(arg[0], '.lua') 6 | test_run = require('test_run').new() 7 | require('console').listen(os.getenv('ADMIN')) 8 | 9 | vshard = require('vshard') 10 | names = dofile('names.lua') 11 | cfg = dofile('config.lua') 12 | cfg.weights = nil 13 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 14 | 15 | box.once('schema', function() 16 | box.schema.func.create('echo') 17 | box.schema.role.grant('public', 'execute', 'function', 'echo') 18 | end) 19 | 20 | echo_count = 0 21 | 22 | function echo(...) 23 | echo_count = echo_count + 1 24 | return ... 25 | end 26 | -------------------------------------------------------------------------------- /test/failover/box_1_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_1_c.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_1_d.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_2_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_2_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_2_c.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_3_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/box_3_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/failover/cluster_changes.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c', 'box_1_d' } 4 | REPLICASET_2 = { 'box_2_a', 'box_2_b', 'box_2_c' } 5 | REPLICASET_3 = { 'box_3_a', 'box_3_b' } 6 | 7 | test_run:create_cluster(REPLICASET_1, 'failover') 8 | test_run:create_cluster(REPLICASET_2, 'failover') 9 | test_run:create_cluster(REPLICASET_3, 'failover') 10 | util = require('util') 11 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 12 | util.wait_master(test_run, REPLICASET_2, 'box_2_a') 13 | util.wait_master(test_run, REPLICASET_3, 'box_3_b') 14 | 15 | test_run:cmd('create server router_1 with script="failover/router_1.lua"') 16 | test_run:cmd('start server router_1') 17 | test_run:switch('router_1') 18 | 19 | -- 20 | -- In a case of configuration change all replicaset objects are 21 | -- recreated, replica and replica candidate connections are 22 | -- left in old objects, and are garbage collected. Test, that it 23 | -- does not affect new replicasets, and failover uses new weights 24 | -- and new topology. 25 | -- 26 | 27 | -- 28 | -- First test case: reverse weights, when only replica exists, 29 | -- and no replica candidate. 30 | -- 31 | vshard.router.cfg(cfg) 32 | wait_state('All replicas are ok') 33 | vshard.router.info().alerts 34 | 35 | reverse_weights() 36 | vshard.router.cfg(cfg) 37 | vshard.router.info() 38 | wait_state('All replicas are ok') 39 | vshard.router.info() 40 | 41 | test_run:switch('box_1_a') 42 | reverse_weights() 43 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 44 | test_run:switch('box_1_b') 45 | reverse_weights() 46 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 47 | test_run:switch('box_1_c') 48 | reverse_weights() 49 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 50 | test_run:switch('box_1_d') 51 | reverse_weights() 52 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 53 | test_run:switch('box_2_a') 54 | reverse_weights() 55 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 56 | test_run:switch('box_2_b') 57 | reverse_weights() 58 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 59 | test_run:switch('box_2_c') 60 | reverse_weights() 61 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 62 | test_run:switch('box_3_a') 63 | reverse_weights() 64 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 65 | test_run:switch('box_3_b') 66 | reverse_weights() 67 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 68 | 69 | -- 70 | -- Test removal of candidate and replica. 71 | -- 72 | test_run:switch('router_1') 73 | remove_some_replicas() 74 | vshard.router.cfg(cfg) 75 | info = vshard.router.info() 76 | while #info.alerts ~= 6 do fiber.sleep(0.1) info = vshard.router.info() end 77 | info 78 | test_run:switch('box_1_b') 79 | remove_some_replicas() 80 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 81 | test_run:switch('box_1_d') 82 | remove_some_replicas() 83 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 84 | test_run:switch('box_2_b') 85 | remove_some_replicas() 86 | vshard.storage.cfg(cfg, names.replica_uuid[NAME]) 87 | 88 | -- 89 | -- Test addition of new replicas. 90 | -- 91 | test_run:switch('router_1') 92 | add_some_replicas() 93 | vshard.router.cfg(cfg) 94 | #vshard.router.info().alerts > 1 95 | wait_state('All replicas are ok') 96 | vshard.router.info() 97 | 98 | test_run:switch('default') 99 | test_run:cmd('stop server router_1') 100 | test_run:cmd('cleanup server router_1') 101 | 102 | test_run:drop_cluster(REPLICASET_1) 103 | test_run:drop_cluster(REPLICASET_2) 104 | test_run:drop_cluster(REPLICASET_3) 105 | -------------------------------------------------------------------------------- /test/failover/failover_errinj.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c', 'box_1_d' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'box_2_a', 'box_2_b', 'box_2_c' } 8 | --- 9 | ... 10 | REPLICASET_3 = { 'box_3_a', 'box_3_b' } 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_1, 'failover') 14 | --- 15 | ... 16 | test_run:create_cluster(REPLICASET_2, 'failover') 17 | --- 18 | ... 19 | test_run:create_cluster(REPLICASET_3, 'failover') 20 | --- 21 | ... 22 | util = require('util') 23 | --- 24 | ... 25 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 26 | --- 27 | ... 28 | util.wait_master(test_run, REPLICASET_2, 'box_2_a') 29 | --- 30 | ... 31 | util.wait_master(test_run, REPLICASET_3, 'box_3_b') 32 | --- 33 | ... 34 | test_run:cmd('create server router_1 with script="failover/router_1.lua"') 35 | --- 36 | - true 37 | ... 38 | test_run:cmd('start server router_1') 39 | --- 40 | - true 41 | ... 42 | test_run:switch('router_1') 43 | --- 44 | - true 45 | ... 46 | vshard.router.cfg(cfg) 47 | --- 48 | ... 49 | -- Check that already run failover step is restarted on 50 | -- configuration change (if some replicasets are removed from 51 | -- config). 52 | rs1 = vshard.router.static.replicasets[rs_uuid[1]] 53 | --- 54 | ... 55 | while not rs1.replica or not rs1.replica.conn:is_connected() do fiber.sleep(0.1) end 56 | --- 57 | ... 58 | vshard.router.internal.errinj.ERRINJ_FAILOVER_CHANGE_CFG = true 59 | --- 60 | ... 61 | wait_state('Configuration has changed, restart ') 62 | --- 63 | ... 64 | test_run:switch('default') 65 | --- 66 | - true 67 | ... 68 | test_run:cmd('stop server router_1') 69 | --- 70 | - true 71 | ... 72 | test_run:cmd('cleanup server router_1') 73 | --- 74 | - true 75 | ... 76 | test_run:drop_cluster(REPLICASET_1) 77 | --- 78 | ... 79 | test_run:drop_cluster(REPLICASET_2) 80 | --- 81 | ... 82 | test_run:drop_cluster(REPLICASET_3) 83 | --- 84 | ... 85 | -------------------------------------------------------------------------------- /test/failover/failover_errinj.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c', 'box_1_d' } 4 | REPLICASET_2 = { 'box_2_a', 'box_2_b', 'box_2_c' } 5 | REPLICASET_3 = { 'box_3_a', 'box_3_b' } 6 | 7 | test_run:create_cluster(REPLICASET_1, 'failover') 8 | test_run:create_cluster(REPLICASET_2, 'failover') 9 | test_run:create_cluster(REPLICASET_3, 'failover') 10 | util = require('util') 11 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 12 | util.wait_master(test_run, REPLICASET_2, 'box_2_a') 13 | util.wait_master(test_run, REPLICASET_3, 'box_3_b') 14 | 15 | test_run:cmd('create server router_1 with script="failover/router_1.lua"') 16 | test_run:cmd('start server router_1') 17 | test_run:switch('router_1') 18 | 19 | vshard.router.cfg(cfg) 20 | -- Check that already run failover step is restarted on 21 | -- configuration change (if some replicasets are removed from 22 | -- config). 23 | rs1 = vshard.router.static.replicasets[rs_uuid[1]] 24 | while not rs1.replica or not rs1.replica.conn:is_connected() do fiber.sleep(0.1) end 25 | vshard.router.internal.errinj.ERRINJ_FAILOVER_CHANGE_CFG = true 26 | wait_state('Configuration has changed, restart ') 27 | 28 | test_run:switch('default') 29 | test_run:cmd('stop server router_1') 30 | test_run:cmd('cleanup server router_1') 31 | 32 | test_run:drop_cluster(REPLICASET_1) 33 | test_run:drop_cluster(REPLICASET_2) 34 | test_run:drop_cluster(REPLICASET_3) 35 | -------------------------------------------------------------------------------- /test/failover/names.lua: -------------------------------------------------------------------------------- 1 | rs_uuid = {'739fe4fb-2850-4cde-9637-10150724c5eb', 2 | '832bbba0-9699-4aa1-907d-c7c7af61f5c9', 3 | '971279ef-5c38-4d14-86ee-0e4a56567955'} 4 | 5 | replica_uuid = { 6 | box_1_a = '3e01062d-5c1b-4382-b14e-f80a517cb462', 7 | box_1_b = 'db778aec-267f-47bb-9347-49828232c8db', 8 | box_1_c = '7223fc89-1a0d-480b-a33e-a8d2b117b13d', 9 | box_1_d = '56bb8450-9526-442b-ba96-b96cc38ee2f9', 10 | box_2_a = '27ef9a48-86f3-4759-89be-17aeaf4bd6ba', 11 | box_2_b = 'e2ed64b7-5c3a-4878-9ced-b71a034bd67f', 12 | box_2_c = 'b7762398-e396-439c-9504-e5ab4f7e0ef6', 13 | box_3_a = 'dbcf1aaa-4e50-4753-bf06-aaba76297624', 14 | box_3_b = '7171703a-fec2-45c9-beac-9a8aa40dcf85', 15 | } 16 | 17 | return {rs_uuid = rs_uuid, replica_uuid = replica_uuid} 18 | -------------------------------------------------------------------------------- /test/failover/router_1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | test_run = require('test_run').new() 5 | 6 | local fio = require('fio') 7 | local name = fio.basename(arg[0], '.lua') 8 | cfg = dofile('config.lua') 9 | vshard = require('vshard') 10 | os = require('os') 11 | fiber = require('fiber') 12 | local names = dofile('names.lua') 13 | log = require('log') 14 | rs_uuid = names.rs_uuid 15 | replica_uuid = names.replica_uuid 16 | zone = nil 17 | if name == 'router_1' then 18 | zone = 1 19 | elseif name == 'router_2' then 20 | zone = 2 21 | elseif name == 'router_3' then 22 | zone = 3 23 | else 24 | zone = 4 25 | end 26 | cfg.zone = zone 27 | 28 | box.cfg{} 29 | 30 | function wait_state(state) 31 | log.info(string.rep('a', 1000)) 32 | while test_run:grep_log(name, state, 1000) == nil do 33 | fiber.sleep(0.1) 34 | end 35 | end 36 | 37 | function priority_order() 38 | local ret = {} 39 | for _, uuid in pairs(rs_uuid) do 40 | local rs = vshard.router.static.replicasets[uuid] 41 | local sorted = {} 42 | for _, replica in pairs(rs.priority_list) do 43 | local z 44 | if replica.zone == nil then 45 | z = 'unknown zone' 46 | else 47 | z = replica.zone 48 | end 49 | table.insert(sorted, z) 50 | end 51 | table.insert(ret, sorted) 52 | end 53 | return ret 54 | end 55 | 56 | require('console').listen(os.getenv('ADMIN')) 57 | -------------------------------------------------------------------------------- /test/failover/router_2.lua: -------------------------------------------------------------------------------- 1 | router_1.lua -------------------------------------------------------------------------------- /test/failover/router_3.lua: -------------------------------------------------------------------------------- 1 | router_1.lua -------------------------------------------------------------------------------- /test/failover/router_4.lua: -------------------------------------------------------------------------------- 1 | router_1.lua -------------------------------------------------------------------------------- /test/failover/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Failover tests 4 | script = test.lua 5 | is_parallel = False 6 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua config.lua names.lua 7 | router_1.lua router_2.lua router_3.lua router_4.lua 8 | box_1_a.lua box_1_b.lua box_1_c.lua box_1_d.lua 9 | box_2_a.lua box_2_b.lua box_2_c.lua 10 | box_3_a.lua box_3_b.lua 11 | -------------------------------------------------------------------------------- /test/failover/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/instances/default.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | local helpers = require('test.luatest_helpers') 3 | 4 | -- 5 | -- Commonly used libraries. Use 'i' prefix as 'instance'. The purpose is to be 6 | -- able to use the libs in server:exec() calls and not get upvalue errors if the 7 | -- same lib is declared in the _test.lua file. 8 | -- 9 | _G.ilt = require('luatest') 10 | 11 | -- Somewhy shutdown hangs on new Tarantools even though the nodes do not seem to 12 | -- have any long requests running. 13 | if box.ctl.set_on_shutdown_timeout then 14 | box.ctl.set_on_shutdown_timeout(0.001) 15 | end 16 | 17 | box.cfg(helpers.box_cfg()) 18 | box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) 19 | 20 | _G.ready = true 21 | -------------------------------------------------------------------------------- /test/instances/router.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | local helpers = require('test.luatest_helpers') 3 | 4 | -- 5 | -- Commonly used libraries. Use 'i' prefix as 'instance'. The purpose is to be 6 | -- able to use the libs in server:exec() calls and not get upvalue errors if the 7 | -- same lib is declared in the _test.lua file. 8 | -- 9 | _G.ifiber = require('fiber') 10 | _G.ilt = require('luatest') 11 | _G.imsgpack = require('msgpack') 12 | _G.ivtest = require('test.luatest_helpers.vtest') 13 | _G.ivconst = require('vshard.consts') 14 | _G.iverror = require('vshard.error') 15 | _G.iwait_timeout = _G.ivtest.wait_timeout 16 | _G.iyaml = require('yaml') 17 | 18 | -- Do not load entire vshard into the global namespace to catch errors when code 19 | -- relies on that. 20 | _G.vshard = { 21 | router = require('vshard.router'), 22 | } 23 | _G.ivshard = _G.vshard 24 | 25 | -- Somewhy shutdown hangs on new Tarantools even though the nodes do not seem to 26 | -- have any long requests running. 27 | if box.ctl.set_on_shutdown_timeout then 28 | box.ctl.set_on_shutdown_timeout(0.001) 29 | end 30 | 31 | box.cfg(helpers.box_cfg()) 32 | box.schema.user.grant('guest', 'super', nil, nil, {if_not_exists = true}) 33 | 34 | _G.ready = true 35 | -------------------------------------------------------------------------------- /test/lua_libs/git_util.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- Lua bridge for some of the git commands. 3 | -- 4 | local os = require('os') 5 | 6 | -- 7 | -- Exec a git command. 8 | -- @param cmd Git command to run. 9 | -- @param params Table of parameters: 10 | -- * options - git options. 11 | -- * args - command arguments. 12 | -- * dir - working directory. 13 | -- * fout - write output to the file. 14 | local function exec(cmd, params) 15 | params.options = params.options or '' 16 | params.args = params.args or '' 17 | local shell_cmd = string.format('git %s %s %s', params.options, cmd, 18 | params.args) 19 | if params.fout then 20 | shell_cmd = string.format('%s >%s', shell_cmd, params.fout) 21 | end 22 | if params.dir then 23 | shell_cmd = string.format('cd %s && %s', params.dir, shell_cmd) 24 | end 25 | local res = os.execute(shell_cmd) 26 | assert(res == 0, 'Git cmd error: ' .. res) 27 | end 28 | 29 | local function log_hashes(params) 30 | params.args = "--format='%h' " .. params.args 31 | -- Store log to the file. 32 | local temp_file = os.tmpname() 33 | params.fout = temp_file 34 | exec('log', params) 35 | local lines = {} 36 | for line in io.lines(temp_file) do 37 | table.insert(lines, line) 38 | end 39 | os.remove(temp_file) 40 | return lines 41 | end 42 | 43 | return { 44 | exec = exec, 45 | log_hashes = log_hashes 46 | } 47 | -------------------------------------------------------------------------------- /test/lua_libs/localcfg.lua: -------------------------------------------------------------------------------- 1 | ../../example/localcfg.lua -------------------------------------------------------------------------------- /test/luatest_helpers.lua: -------------------------------------------------------------------------------- 1 | local fun = require('fun') 2 | local json = require('json') 3 | local fio = require('fio') 4 | local log = require('log') 5 | local yaml = require('yaml') 6 | local fiber = require('fiber') 7 | 8 | local luatest_helpers = { 9 | SOCKET_DIR = fio.abspath(os.getenv('VARDIR') or 'test/var') 10 | } 11 | 12 | luatest_helpers.Server = require('test.luatest_helpers.server') 13 | 14 | local function default_cfg() 15 | return { 16 | work_dir = os.getenv('TARANTOOL_WORKDIR'), 17 | listen = os.getenv('TARANTOOL_LISTEN'), 18 | log = ('%s/%s.log'):format(os.getenv('TARANTOOL_WORKDIR'), os.getenv('TARANTOOL_ALIAS')), 19 | } 20 | end 21 | 22 | local function env_cfg() 23 | local src = os.getenv('TARANTOOL_BOX_CFG') 24 | if src == nil then 25 | return {} 26 | end 27 | local res = json.decode(src) 28 | assert(type(res) == 'table') 29 | return res 30 | end 31 | 32 | -- Collect box.cfg table from values passed through 33 | -- luatest_helpers.Server({<...>}) and from the given argument. 34 | -- 35 | -- Use it from inside an instance script. 36 | function luatest_helpers.box_cfg(cfg) 37 | return fun.chain(default_cfg(), env_cfg(), cfg or {}):tomap() 38 | end 39 | 40 | function luatest_helpers.instance_uri(alias, instance_id) 41 | if instance_id == nil then 42 | instance_id = '' 43 | end 44 | instance_id = tostring(instance_id) 45 | return ('%s/%s%s.iproto'):format(luatest_helpers.SOCKET_DIR, alias, instance_id); 46 | end 47 | 48 | function luatest_helpers:get_vclock(server) 49 | return server:eval('return box.info.vclock') 50 | end 51 | 52 | function luatest_helpers:wait_vclock(server, to_vclock) 53 | while true do 54 | local vclock = self:get_vclock(server) 55 | local ok = true 56 | for server_id, to_lsn in pairs(to_vclock) do 57 | local lsn = vclock[server_id] 58 | if lsn == nil or lsn < to_lsn then 59 | ok = false 60 | break 61 | end 62 | end 63 | if ok then 64 | return 65 | end 66 | log.info("wait vclock: %s to %s", yaml.encode(vclock), 67 | yaml.encode(to_vclock)) 68 | fiber.sleep(0.001) 69 | end 70 | end 71 | 72 | return luatest_helpers 73 | -------------------------------------------------------------------------------- /test/luatest_helpers/cluster.lua: -------------------------------------------------------------------------------- 1 | local fio = require('fio') 2 | local Server = require('test.luatest_helpers.server') 3 | 4 | local root = os.environ()['SOURCEDIR'] or '.' 5 | 6 | local Cluster = {} 7 | 8 | function Cluster:new(object) 9 | self:inherit(object) 10 | object:initialize() 11 | self.servers = object.servers 12 | self.built_servers = object.built_servers 13 | return object 14 | end 15 | 16 | function Cluster:inherit(object) 17 | object = object or {} 18 | setmetatable(object, self) 19 | self.__index = self 20 | self.servers = {} 21 | self.built_servers = {} 22 | return object 23 | end 24 | 25 | function Cluster:initialize() 26 | self.servers = {} 27 | end 28 | 29 | function Cluster:server(alias) 30 | for _, server in ipairs(self.servers) do 31 | if server.alias == alias then 32 | return server 33 | end 34 | end 35 | return nil 36 | end 37 | 38 | function Cluster:drop() 39 | for _, server in ipairs(self.servers) do 40 | if server ~= nil then 41 | server:stop() 42 | server:cleanup() 43 | end 44 | end 45 | end 46 | 47 | function Cluster:get_index(server) 48 | local index = nil 49 | for i, v in ipairs(self.servers) do 50 | if (v.id == server) then 51 | index = i 52 | end 53 | end 54 | return index 55 | end 56 | 57 | function Cluster:delete_server(server) 58 | local idx = self:get_index(server) 59 | if idx == nil then 60 | print("Key does not exist") 61 | else 62 | table.remove(self.servers, idx) 63 | end 64 | end 65 | 66 | function Cluster:stop() 67 | for _, server in ipairs(self.servers) do 68 | if server ~= nil then 69 | server:stop() 70 | end 71 | end 72 | end 73 | 74 | function Cluster:start(opts) 75 | for _, server in ipairs(self.servers) do 76 | if not server.process then 77 | server:start({wait_for_readiness = false}) 78 | end 79 | end 80 | 81 | -- The option is true by default. 82 | local wait_for_readiness = true 83 | if opts ~= nil and opts.wait_for_readiness ~= nil then 84 | wait_for_readiness = opts.wait_for_readiness 85 | end 86 | 87 | if wait_for_readiness then 88 | for _, server in ipairs(self.servers) do 89 | server:wait_for_readiness() 90 | end 91 | end 92 | end 93 | 94 | function Cluster:build_server(server_config, instance_file) 95 | instance_file = instance_file or 'default.lua' 96 | server_config = table.deepcopy(server_config) 97 | server_config.command = fio.pathjoin(root, 'test/instances/', instance_file) 98 | assert(server_config.alias, 'Either replicaset.alias or server.alias must be given') 99 | local server = Server:new(server_config) 100 | table.insert(self.built_servers, server) 101 | return server 102 | end 103 | 104 | function Cluster:add_server(server) 105 | if self:server(server.alias) ~= nil then 106 | error('Alias is not provided') 107 | end 108 | table.insert(self.servers, server) 109 | end 110 | 111 | function Cluster:build_and_add_server(config, replicaset_config, engine) 112 | local server = self:build_server(config, replicaset_config, engine) 113 | self:add_server(server) 114 | return server 115 | end 116 | 117 | 118 | function Cluster:get_leader() 119 | for _, instance in ipairs(self.servers) do 120 | if instance:eval('return box.info.ro') == false then 121 | return instance 122 | end 123 | end 124 | end 125 | 126 | function Cluster:exec_on_leader(bootstrap_function) 127 | local leader = self:get_leader() 128 | return leader:exec(bootstrap_function) 129 | end 130 | 131 | 132 | return Cluster 133 | -------------------------------------------------------------------------------- /test/misc/bad_uuid_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | local fio = require('fio') 4 | local NAME = fio.basename(arg[0], '.lua') 5 | 6 | test_run = require('test_run').new() 7 | require('console').listen(os.getenv('ADMIN')) 8 | 9 | bad_uuid_config = require('bad_uuid_config') 10 | shard_cfg = bad_uuid_config.cfg 11 | replicaset_uuid = bad_uuid_config.replicaset_uuid 12 | name_to_uuid = bad_uuid_config.name_to_uuid 13 | vshard = require('vshard') 14 | 15 | if NAME == 'bad_uuid_2_a' then 16 | local rs2 = shard_cfg.sharding[replicaset_uuid[2]] 17 | local uuid = name_to_uuid.bad_uuid_2_a 18 | local bad_uuid_2_a = rs2.replicas[uuid] 19 | rs2.replicas[uuid] = nil 20 | -- Change UUID on a single server. Other replicas do not 21 | -- see this change. 22 | uuid = '2d92ae8a-afc0-4e91-ba34-843a356b8ed7' 23 | rs2.replicas[uuid] = bad_uuid_2_a 24 | name_to_uuid.bad_uuid_2_a = uuid 25 | end 26 | if NAME == 'bad_uuid_2_a_repaired' then 27 | NAME = 'bad_uuid_2_a' 28 | end 29 | vshard.storage.cfg(shard_cfg, name_to_uuid[NAME]) 30 | -------------------------------------------------------------------------------- /test/misc/bad_uuid_1_b.lua: -------------------------------------------------------------------------------- 1 | bad_uuid_1_a.lua -------------------------------------------------------------------------------- /test/misc/bad_uuid_2_a.lua: -------------------------------------------------------------------------------- 1 | bad_uuid_1_a.lua -------------------------------------------------------------------------------- /test/misc/bad_uuid_2_a_repaired.lua: -------------------------------------------------------------------------------- 1 | bad_uuid_1_a.lua -------------------------------------------------------------------------------- /test/misc/bad_uuid_2_b.lua: -------------------------------------------------------------------------------- 1 | bad_uuid_1_a.lua -------------------------------------------------------------------------------- /test/misc/bad_uuid_config.lua: -------------------------------------------------------------------------------- 1 | local replicaset_uuid = {'cbf06940-0790-498b-948d-042b62cf3d29', 2 | 'ac522f65-aa94-4134-9f64-51ee384f1a54'} 3 | 4 | local name_to_uuid = { 5 | bad_uuid_1_a = '8a274925-a26d-47fc-9e1b-af88ce939412', 6 | bad_uuid_1_b = '3de2e3e1-9ebe-4d0d-abb1-26d301b84633', 7 | bad_uuid_2_a = '1e02ae8a-afc0-4e91-ba34-843a356b8ed7', 8 | bad_uuid_2_b = '001688c3-66f8-4a31-8e19-036c17d489c2', 9 | } 10 | 11 | local shard_cfg = { 12 | sharding = { 13 | [replicaset_uuid[1]] = { 14 | replicas = { 15 | [name_to_uuid.bad_uuid_1_a] = { 16 | uri = 'storage:storage@127.0.0.1:3301', 17 | name = 'bad_uuid_1_a', 18 | master = true 19 | }, 20 | [name_to_uuid.bad_uuid_1_b] = { 21 | uri = 'storage:storage@127.0.0.1:3302', 22 | name = 'bad_uuid_1_b' 23 | } 24 | } 25 | }, 26 | [replicaset_uuid[2]] = { 27 | replicas = { 28 | [name_to_uuid.bad_uuid_2_a] = { 29 | uri = 'storage:storage@127.0.0.1:3303', 30 | name = 'bad_uuid_2_a', 31 | master = true 32 | }, 33 | [name_to_uuid.bad_uuid_2_b] = { 34 | uri = 'storage:storage@127.0.0.1:3304', 35 | name = 'bad_uuid_2_b' 36 | } 37 | } 38 | }, 39 | } 40 | } 41 | 42 | return {cfg = shard_cfg, replicaset_uuid = replicaset_uuid, 43 | name_to_uuid = name_to_uuid, replication_connect_quorum = 0} 44 | -------------------------------------------------------------------------------- /test/misc/bad_uuid_router.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | test_run = require('test_run').new() 4 | require('console').listen(os.getenv('ADMIN')) 5 | 6 | -- Call a configuration provider 7 | cfg = require('bad_uuid_config').cfg 8 | 9 | -- Start the database with sharding 10 | vshard = require('vshard') 11 | util = require('util') 12 | vshard.router.cfg(cfg) 13 | box.cfg{} 14 | -------------------------------------------------------------------------------- /test/misc/fullmesh.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'misc') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'misc') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | test_run:wait_fullmesh(REPLICASET_1) 20 | --- 21 | ... 22 | test_run:wait_fullmesh(REPLICASET_2) 23 | --- 24 | ... 25 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 26 | --- 27 | ... 28 | -- 29 | -- gh-83: use fullmesh topology in vshard. Scenario of the test: 30 | -- start writing a tuple on a master. Then switch master. After 31 | -- switch the tuple still should be sent from the slave to a new 32 | -- master. 33 | -- 34 | _ = test_run:switch('storage_1_a') 35 | --- 36 | ... 37 | -- Block new requests sending. 38 | box.error.injection.set("ERRINJ_WAL_DELAY", true) 39 | --- 40 | - ok 41 | ... 42 | f = fiber.create(function() box.space.test:replace{1, 1} end) 43 | --- 44 | ... 45 | box.space.test:select{} 46 | --- 47 | - - [1, 1] 48 | ... 49 | cfg.replication_connect_quorum = 0 50 | --- 51 | ... 52 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 53 | --- 54 | ... 55 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 56 | --- 57 | ... 58 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 59 | --- 60 | ... 61 | _ = test_run:switch('storage_1_b') 62 | --- 63 | ... 64 | cfg.replication_connect_quorum = 0 65 | --- 66 | ... 67 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 68 | --- 69 | ... 70 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 71 | --- 72 | ... 73 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 74 | --- 75 | ... 76 | box.space.test:select{} 77 | --- 78 | - [] 79 | ... 80 | _ = test_run:switch('storage_1_a') 81 | --- 82 | ... 83 | box.error.injection.set("ERRINJ_WAL_DELAY", false) 84 | --- 85 | - ok 86 | ... 87 | _ = test_run:switch('storage_1_b') 88 | --- 89 | ... 90 | while box.space.test:count() == 0 do fiber.sleep(0.1) end 91 | --- 92 | ... 93 | box.space.test:select{} 94 | --- 95 | - - [1, 1] 96 | ... 97 | _ = test_run:cmd("switch default") 98 | --- 99 | ... 100 | test_run:drop_cluster(REPLICASET_2) 101 | --- 102 | ... 103 | test_run:drop_cluster(REPLICASET_1) 104 | --- 105 | ... 106 | -------------------------------------------------------------------------------- /test/misc/fullmesh.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | test_run:create_cluster(REPLICASET_1, 'misc') 5 | test_run:create_cluster(REPLICASET_2, 'misc') 6 | util = require('util') 7 | test_run:wait_fullmesh(REPLICASET_1) 8 | test_run:wait_fullmesh(REPLICASET_2) 9 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 10 | 11 | -- 12 | -- gh-83: use fullmesh topology in vshard. Scenario of the test: 13 | -- start writing a tuple on a master. Then switch master. After 14 | -- switch the tuple still should be sent from the slave to a new 15 | -- master. 16 | -- 17 | 18 | _ = test_run:switch('storage_1_a') 19 | -- Block new requests sending. 20 | box.error.injection.set("ERRINJ_WAL_DELAY", true) 21 | f = fiber.create(function() box.space.test:replace{1, 1} end) 22 | box.space.test:select{} 23 | cfg.replication_connect_quorum = 0 24 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 25 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 26 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 27 | _ = test_run:switch('storage_1_b') 28 | cfg.replication_connect_quorum = 0 29 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 30 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 31 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 32 | box.space.test:select{} 33 | _ = test_run:switch('storage_1_a') 34 | box.error.injection.set("ERRINJ_WAL_DELAY", false) 35 | _ = test_run:switch('storage_1_b') 36 | while box.space.test:count() == 0 do fiber.sleep(0.1) end 37 | box.space.test:select{} 38 | 39 | _ = test_run:cmd("switch default") 40 | test_run:drop_cluster(REPLICASET_2) 41 | test_run:drop_cluster(REPLICASET_1) 42 | -------------------------------------------------------------------------------- /test/misc/master_switch.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'misc') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'misc') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 26 | --- 27 | ... 28 | _ = test_run:cmd('stop server storage_1_b') 29 | --- 30 | ... 31 | _ = test_run:switch('storage_1_a') 32 | --- 33 | ... 34 | vshard.storage.bucket_force_create(1) 35 | --- 36 | - true 37 | ... 38 | box.space.test:insert{1, 1, 1} 39 | --- 40 | - [1, 1, 1] 41 | ... 42 | _ = test_run:switch('default') 43 | --- 44 | ... 45 | _ = test_run:cmd('stop server storage_1_a') 46 | --- 47 | ... 48 | _ = test_run:cmd('start server storage_1_b') 49 | --- 50 | ... 51 | _ = test_run:switch('storage_1_b') 52 | --- 53 | ... 54 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 55 | --- 56 | ... 57 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 58 | --- 59 | ... 60 | cfg.replication_connect_quorum = 1 61 | --- 62 | ... 63 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 64 | --- 65 | ... 66 | vshard.storage.bucket_force_create(1) 67 | --- 68 | - true 69 | ... 70 | box.space.test:insert{1, 1, 2} 71 | --- 72 | - [1, 1, 2] 73 | ... 74 | -- 75 | -- Test that the replication is broken - one insert must be newer, 76 | -- then another, but here the replication stops. This situation 77 | -- occurs, when a master is down, is repliced with another master, 78 | -- and then becames master again. 79 | -- 80 | _ = test_run:cmd('start server storage_1_a') 81 | --- 82 | ... 83 | _ = test_run:switch('storage_1_a') 84 | --- 85 | ... 86 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 87 | --- 88 | ... 89 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 90 | --- 91 | ... 92 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 93 | --- 94 | ... 95 | box.space.test:get{1} 96 | --- 97 | - [1, 1, 1] 98 | ... 99 | while not test_run:grep_log('storage_1_a', 'error applying row') do fiber.sleep(0.1) end 100 | --- 101 | ... 102 | _ = test_run:switch('storage_1_b') 103 | --- 104 | ... 105 | box.space.test:get{1} 106 | --- 107 | - [1, 1, 2] 108 | ... 109 | _ = test_run:cmd("switch default") 110 | --- 111 | ... 112 | test_run:drop_cluster(REPLICASET_2) 113 | --- 114 | ... 115 | test_run:drop_cluster(REPLICASET_1) 116 | --- 117 | ... 118 | -------------------------------------------------------------------------------- /test/misc/master_switch.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | test_run:create_cluster(REPLICASET_1, 'misc') 5 | test_run:create_cluster(REPLICASET_2, 'misc') 6 | util = require('util') 7 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 8 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 9 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 10 | 11 | _ = test_run:cmd('stop server storage_1_b') 12 | _ = test_run:switch('storage_1_a') 13 | vshard.storage.bucket_force_create(1) 14 | box.space.test:insert{1, 1, 1} 15 | 16 | _ = test_run:switch('default') 17 | _ = test_run:cmd('stop server storage_1_a') 18 | _ = test_run:cmd('start server storage_1_b') 19 | _ = test_run:switch('storage_1_b') 20 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 21 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 22 | cfg.replication_connect_quorum = 1 23 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 24 | vshard.storage.bucket_force_create(1) 25 | box.space.test:insert{1, 1, 2} 26 | 27 | -- 28 | -- Test that the replication is broken - one insert must be newer, 29 | -- then another, but here the replication stops. This situation 30 | -- occurs, when a master is down, is repliced with another master, 31 | -- and then becames master again. 32 | -- 33 | _ = test_run:cmd('start server storage_1_a') 34 | _ = test_run:switch('storage_1_a') 35 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 36 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 37 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 38 | box.space.test:get{1} 39 | while not test_run:grep_log('storage_1_a', 'error applying row') do fiber.sleep(0.1) end 40 | 41 | _ = test_run:switch('storage_1_b') 42 | box.space.test:get{1} 43 | 44 | _ = test_run:cmd("switch default") 45 | test_run:drop_cluster(REPLICASET_2) 46 | test_run:drop_cluster(REPLICASET_1) 47 | -------------------------------------------------------------------------------- /test/misc/router_1.lua: -------------------------------------------------------------------------------- 1 | ../../example/router_1.lua -------------------------------------------------------------------------------- /test/misc/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/misc/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/misc/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/misc/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/misc/storage_3_a.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | require('console').listen(os.getenv('ADMIN')) 3 | 4 | local fio = require('fio') 5 | local NAME = fio.basename(arg[0], '.lua') 6 | 7 | -- Call a configuration provider 8 | cfg = dofile('localcfg.lua') 9 | cfg.sharding['cbf06940-0790-498b-948d-042b62cf3d29'].replicas['3de2e3e1-9ebe-4d0d-abb1-26d301b84633'] = nil 10 | cfg.sharding['ac522f65-aa94-4134-9f64-51ee384f1a54'].replicas['1e02ae8a-afc0-4e91-ba34-843a356b8ed7'].master = nil 11 | cfg.sharding['ac522f65-aa94-4134-9f64-51ee384f1a54'].replicas['001688c3-66f8-4a31-8e19-036c17d489c2'].master = true 12 | cfg.sharding['910ee49b-2540-41b6-9b8c-c976bef1bb17'] = {replicas = {['ee34807e-be5c-4ae3-8348-e97be227a305'] = {uri = "storage:storage@127.0.0.1:3306", name = 'storage_3_a', master = true}}} 13 | 14 | -- Start the database with sharding 15 | vshard = require('vshard') 16 | vshard.storage.cfg(cfg, 'ee34807e-be5c-4ae3-8348-e97be227a305') 17 | -------------------------------------------------------------------------------- /test/misc/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Misc tests 4 | script = test.lua 5 | is_parallel = False 6 | release_disabled = fullmesh.test.lua 7 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 8 | bad_uuid_config.lua ../../example/localcfg.lua 9 | -------------------------------------------------------------------------------- /test/misc/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/multiple_routers/configs.lua: -------------------------------------------------------------------------------- 1 | util = require('util') 2 | 3 | rs_1_1 = util.replicasets[1] 4 | rs_1_2 = util.replicasets[2] 5 | rs_2_1 = util.replicasets[3] 6 | rs_2_2 = util.replicasets[4] 7 | 8 | local cfg_1 = {replication_connect_quorum = 0} 9 | cfg_1.sharding = { 10 | [rs_1_1] = { 11 | replicas = { 12 | [util.name_to_uuid.storage_1_1_a] = { 13 | uri = 'storage:storage@127.0.0.1:3301', 14 | name = 'storage_1_1_a', 15 | master = true, 16 | }, 17 | [util.name_to_uuid.storage_1_1_b] = { 18 | uri = 'storage:storage@127.0.0.1:3302', 19 | name = 'storage_1_1_b', 20 | }, 21 | } 22 | }, 23 | [rs_1_2] = { 24 | replicas = { 25 | [util.name_to_uuid.storage_1_2_a] = { 26 | uri = 'storage:storage@127.0.0.1:3303', 27 | name = 'storage_1_2_a', 28 | master = true, 29 | }, 30 | [util.name_to_uuid.storage_1_2_b] = { 31 | uri = 'storage:storage@127.0.0.1:3304', 32 | name = 'storage_1_2_b', 33 | }, 34 | } 35 | }, 36 | } 37 | 38 | 39 | local cfg_2 = {replication_connect_quorum = 0} 40 | cfg_2.sharding = { 41 | [rs_2_1] = { 42 | replicas = { 43 | [util.name_to_uuid.storage_2_1_a] = { 44 | uri = 'storage:storage@127.0.0.1:3305', 45 | name = 'storage_2_1_a', 46 | master = true, 47 | }, 48 | [util.name_to_uuid.storage_2_1_b] = { 49 | uri = 'storage:storage@127.0.0.1:3306', 50 | name = 'storage_2_1_b', 51 | }, 52 | } 53 | }, 54 | [rs_2_2] = { 55 | replicas = { 56 | [util.name_to_uuid.storage_2_2_a] = { 57 | uri = 'storage:storage@127.0.0.1:3307', 58 | name = 'storage_2_2_a', 59 | master = true, 60 | }, 61 | [util.name_to_uuid.storage_2_2_b] = { 62 | uri = 'storage:storage@127.0.0.1:3308', 63 | name = 'storage_2_2_b', 64 | }, 65 | } 66 | }, 67 | } 68 | 69 | return { 70 | cfg_1 = cfg_1, 71 | cfg_2 = cfg_2, 72 | } 73 | -------------------------------------------------------------------------------- /test/multiple_routers/router_1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | -- Get instance name 6 | local fio = require('fio') 7 | local NAME = fio.basename(arg[0], '.lua') 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | 11 | configs = dofile('configs.lua') 12 | 13 | -- Start the database with sharding 14 | vshard = require('vshard') 15 | box.cfg{} 16 | -------------------------------------------------------------------------------- /test/multiple_routers/storage_1_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | -- Get instance name. 4 | NAME = require('fio').basename(arg[0], '.lua') 5 | 6 | -- Fetch config for the cluster of the instance. 7 | if NAME:sub(9,9) == '1' then 8 | cfg = dofile('configs.lua').cfg_1 9 | else 10 | cfg = dofile('configs.lua').cfg_2 11 | end 12 | require('storage_template') 13 | -------------------------------------------------------------------------------- /test/multiple_routers/storage_1_1_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_1_2_a.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_1_2_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_2_1_a.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_2_1_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_2_2_a.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/storage_2_2_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_1_a.lua -------------------------------------------------------------------------------- /test/multiple_routers/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Multiple routers tests 4 | script = test.lua 5 | is_parallel = False 6 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 7 | ../lua_libs/storage_template.lua configs.lua 8 | -------------------------------------------------------------------------------- /test/multiple_routers/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/rebalancer/box_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | NAME = require('fio').basename(arg[0], '.lua') 3 | cfg = dofile('config.lua') 4 | util = require('util') 5 | if NAME == 'box_3_a' or NAME == 'box_3_b' or 6 | NAME == 'box_4_a' or NAME == 'box_4_b' or 7 | string.match(NAME, 'fullbox') then 8 | add_replicaset() 9 | end 10 | if NAME == 'box_4_a' or NAME == 'box_4_b' or 11 | string.match(NAME, 'fullbox') then 12 | add_second_replicaset() 13 | end 14 | 15 | function switch_rs1_master() 16 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.box_1_a].master = nil 17 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.box_1_b].master = true 18 | end 19 | 20 | function nullify_rs_weight() 21 | cfg.sharding[util.replicasets[1]].weight = 0 22 | end 23 | 24 | function create_simple_space(...) 25 | local s = box.schema.create_space(...) 26 | s:create_index('pk') 27 | s:create_index(cfg.shard_index or 'bucket_id', 28 | {parts = {{2, 'unsigned'}}, unique = false}) 29 | end 30 | 31 | finish_refs = false 32 | function make_ref() while not finish_refs do fiber.sleep(0.01) end end 33 | 34 | if NAME ~= 'box_1_a' and NAME ~= 'box_1_b' then 35 | CHANGE_SPACE_IDS = true 36 | end 37 | require('storage_template') 38 | -------------------------------------------------------------------------------- /test/rebalancer/box_1_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_2_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_2_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_3_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_3_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_4_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/box_4_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/config.lua: -------------------------------------------------------------------------------- 1 | local util = require('util') 2 | 3 | sharding = { 4 | [util.replicasets[1]] = { 5 | replicas = { 6 | [util.name_to_uuid.box_1_a] = { 7 | uri = 'storage:storage@127.0.0.1:3301', 8 | name = 'box_1_a', 9 | master = true, 10 | }, 11 | [util.name_to_uuid.box_1_b] = { 12 | uri = 'storage:storage@127.0.0.1:3302', 13 | name = 'box_1_b', 14 | } 15 | } 16 | }, 17 | [util.replicasets[2]] = { 18 | replicas = { 19 | [util.name_to_uuid.box_2_a] = { 20 | uri = 'storage:storage@127.0.0.1:3303', 21 | name = 'box_2_a', 22 | master = true, 23 | }, 24 | [util.name_to_uuid.box_2_b] = { 25 | uri = 'storage:storage@127.0.0.1:3304', 26 | name = 'box_2_b', 27 | } 28 | } 29 | } 30 | } 31 | 32 | function add_replicaset() 33 | sharding[util.replicasets[3]] = { 34 | replicas = { 35 | [util.name_to_uuid.box_3_a] = { 36 | uri = 'storage:storage@127.0.0.1:3305', 37 | name = 'box_3_a', 38 | master = true 39 | }, 40 | [util.name_to_uuid.box_3_b] = { 41 | uri = 'storage:storage@127.0.0.1:3306', 42 | name = 'box_3_b', 43 | } 44 | } 45 | } 46 | end 47 | 48 | function add_second_replicaset() 49 | sharding[util.replicasets[4]] = { 50 | replicas = { 51 | [util.name_to_uuid.box_4_a] = { 52 | uri = 'storage:storage@127.0.0.1:3307', 53 | name = 'box_4_a', 54 | master = true 55 | }, 56 | [util.name_to_uuid.box_4_b] = { 57 | uri = 'storage:storage@127.0.0.1:3308', 58 | name = 'box_4_b', 59 | } 60 | } 61 | } 62 | end 63 | 64 | function remove_replicaset_first_stage() 65 | sharding[util.replicasets[3]].weight = 0 66 | end 67 | 68 | function remove_replicaset_second_stage() 69 | sharding[util.replicasets[3]] = nil 70 | end 71 | 72 | function remove_second_replicaset_first_stage() 73 | sharding[util.replicasets[4]].weight = 0 74 | end 75 | 76 | return { 77 | -- Use small number of buckets to speedup tests. 78 | bucket_count = 200, 79 | sharding = sharding, 80 | rebalancer_disbalance_threshold = 0.01, 81 | rebalancer_max_sending = 5, 82 | shard_index = 'vbucket', 83 | replication_connect_quorum = 0, 84 | replication_connect_timeout = 0.01, 85 | } 86 | -------------------------------------------------------------------------------- /test/rebalancer/engine.cfg: -------------------------------------------------------------------------------- 1 | { 2 | "rebalancer.test.lua": { 3 | "memtx": {"engine": "memtx"}, 4 | "vinyl": {"engine": "vinyl"} 5 | }, 6 | "restart_during_rebalancing.test.lua": { 7 | "memtx": {"engine": "memtx"}, 8 | "vinyl": {"engine": "vinyl"} 9 | }, 10 | "stress_add_remove_rs.test.lua": { 11 | "memtx": {"engine": "memtx"}, 12 | "vinyl": {"engine": "vinyl"} 13 | }, 14 | "stress_add_remove_several_rs.test.lua": { 15 | "memtx": {"engine": "memtx"}, 16 | "vinyl": {"engine": "vinyl"} 17 | }, 18 | "parallel.test.lua": { 19 | "memtx": {"engine": "memtx"}, 20 | "vinyl": {"engine": "vinyl"} 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test/rebalancer/fullbox_1_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_1_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_2_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_2_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_3_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_3_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_4_a.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/fullbox_4_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/rebalancer/parallel.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | test_run = require('test_run').new() 3 | | --- 4 | | ... 5 | 6 | REPLICASET_1 = { 'box_1_a', 'box_1_b' } 7 | | --- 8 | | ... 9 | REPLICASET_2 = { 'box_2_a', 'box_2_b' } 10 | | --- 11 | | ... 12 | REPLICASET_3 = { 'box_3_a', 'box_3_b' } 13 | | --- 14 | | ... 15 | REPLICASET_4 = { 'box_4_a', 'box_4_b' } 16 | | --- 17 | | ... 18 | engine = test_run:get_cfg('engine') 19 | | --- 20 | | ... 21 | 22 | test_run:create_cluster(REPLICASET_1, 'rebalancer') 23 | | --- 24 | | ... 25 | test_run:create_cluster(REPLICASET_2, 'rebalancer') 26 | | --- 27 | | ... 28 | test_run:create_cluster(REPLICASET_3, 'rebalancer') 29 | | --- 30 | | ... 31 | test_run:create_cluster(REPLICASET_4, 'rebalancer') 32 | | --- 33 | | ... 34 | util = require('util') 35 | | --- 36 | | ... 37 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 38 | | --- 39 | | ... 40 | util.wait_master(test_run, REPLICASET_2, 'box_2_a') 41 | | --- 42 | | ... 43 | util.wait_master(test_run, REPLICASET_3, 'box_3_a') 44 | | --- 45 | | ... 46 | util.wait_master(test_run, REPLICASET_4, 'box_4_a') 47 | | --- 48 | | ... 49 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3, \ 50 | REPLICASET_4}, 'bootstrap_storage(\'%s\')', engine) 51 | | --- 52 | | ... 53 | 54 | -- 55 | -- The test is about parallel rebalancer. It is not very different 56 | -- from a normal rebalancer except the problem of max receiving 57 | -- bucket limit. Workers should correctly handle that, and of 58 | -- course rebalancing should never totally stop. 59 | -- 60 | 61 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'add_replicaset()') 62 | | --- 63 | | ... 64 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3}, 'add_second_replicaset()') 65 | | --- 66 | | ... 67 | -- 4 replicasets, 1 sends to 3. It has 5 workers. It means, that 68 | -- throttling is inevitable. 69 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3, REPLICASET_4}, [[\ 70 | cfg.rebalancer_max_receiving = 1\ 71 | vshard.storage.cfg(cfg, box.info.uuid)\ 72 | ]]) 73 | | --- 74 | | ... 75 | 76 | test_run:switch('box_1_a') 77 | | --- 78 | | - true 79 | | ... 80 | vshard.storage.bucket_force_create(1, 200) 81 | | --- 82 | | - true 83 | | ... 84 | t1 = fiber.time() 85 | | --- 86 | | ... 87 | wait_rebalancer_state('The cluster is balanced ok', test_run) 88 | | --- 89 | | ... 90 | t2 = fiber.time() 91 | | --- 92 | | ... 93 | -- Rebalancing should not stop. It can be checked by watching if 94 | -- there was a sleep REBALANCER_WORK_INTERVAL (which is 10 95 | -- seconds). 96 | (t2 - t1 < 10) or {t1, t2} 97 | | --- 98 | | - true 99 | | ... 100 | 101 | test_run:switch('default') 102 | | --- 103 | | - true 104 | | ... 105 | test_run:drop_cluster(REPLICASET_4) 106 | | --- 107 | | ... 108 | test_run:drop_cluster(REPLICASET_3) 109 | | --- 110 | | ... 111 | test_run:drop_cluster(REPLICASET_2) 112 | | --- 113 | | ... 114 | test_run:drop_cluster(REPLICASET_1) 115 | | --- 116 | | ... 117 | -------------------------------------------------------------------------------- /test/rebalancer/parallel.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | REPLICASET_1 = { 'box_1_a', 'box_1_b' } 4 | REPLICASET_2 = { 'box_2_a', 'box_2_b' } 5 | REPLICASET_3 = { 'box_3_a', 'box_3_b' } 6 | REPLICASET_4 = { 'box_4_a', 'box_4_b' } 7 | engine = test_run:get_cfg('engine') 8 | 9 | test_run:create_cluster(REPLICASET_1, 'rebalancer') 10 | test_run:create_cluster(REPLICASET_2, 'rebalancer') 11 | test_run:create_cluster(REPLICASET_3, 'rebalancer') 12 | test_run:create_cluster(REPLICASET_4, 'rebalancer') 13 | util = require('util') 14 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 15 | util.wait_master(test_run, REPLICASET_2, 'box_2_a') 16 | util.wait_master(test_run, REPLICASET_3, 'box_3_a') 17 | util.wait_master(test_run, REPLICASET_4, 'box_4_a') 18 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3, \ 19 | REPLICASET_4}, 'bootstrap_storage(\'%s\')', engine) 20 | 21 | -- 22 | -- The test is about parallel rebalancer. It is not very different 23 | -- from a normal rebalancer except the problem of max receiving 24 | -- bucket limit. Workers should correctly handle that, and of 25 | -- course rebalancing should never totally stop. 26 | -- 27 | 28 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'add_replicaset()') 29 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3}, 'add_second_replicaset()') 30 | -- 4 replicasets, 1 sends to 3. It has 5 workers. It means, that 31 | -- throttling is inevitable. 32 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2, REPLICASET_3, REPLICASET_4}, [[\ 33 | cfg.rebalancer_max_receiving = 1\ 34 | vshard.storage.cfg(cfg, box.info.uuid)\ 35 | ]]) 36 | 37 | test_run:switch('box_1_a') 38 | vshard.storage.bucket_force_create(1, 200) 39 | t1 = fiber.time() 40 | wait_rebalancer_state('The cluster is balanced ok', test_run) 41 | t2 = fiber.time() 42 | -- Rebalancing should not stop. It can be checked by watching if 43 | -- there was a sleep REBALANCER_WORK_INTERVAL (which is 10 44 | -- seconds). 45 | (t2 - t1 < 10) or {t1, t2} 46 | 47 | test_run:switch('default') 48 | test_run:drop_cluster(REPLICASET_4) 49 | test_run:drop_cluster(REPLICASET_3) 50 | test_run:drop_cluster(REPLICASET_2) 51 | test_run:drop_cluster(REPLICASET_1) 52 | -------------------------------------------------------------------------------- /test/rebalancer/rebalancer_utils.lua: -------------------------------------------------------------------------------- 1 | local fiber = require('fiber') 2 | local log = require('log') 3 | 4 | local write_iterations = 0 5 | local read_iterations = 0 6 | local write_fiber = 'none' 7 | local read_fiber = 'none' 8 | local bucket_count = 200 9 | -- Set just any limit to ensure it is at least not infinite. We want our disk to 10 | -- have space even if the test runs too long somewhy. 11 | local primary_key_max = 10000 12 | 13 | local function do_write_load() 14 | while true do 15 | local bucket = write_iterations % bucket_count + 1 16 | local pk = write_iterations % primary_key_max 17 | while not vshard.router.call(bucket, 'write', 'do_replace', 18 | {{pk, bucket}}) do 19 | fiber.testcancel() 20 | end 21 | write_iterations = write_iterations + 1 22 | fiber.sleep(0.05) 23 | end 24 | end 25 | 26 | local function do_read_load() 27 | while true do 28 | while read_iterations == write_iterations do 29 | fiber.sleep(0.1) 30 | end 31 | local bucket = read_iterations % bucket_count + 1 32 | local pk = read_iterations % primary_key_max 33 | local tuples = {} 34 | local err = nil 35 | -- Read requests are repeated on replicaset level, 36 | -- and here while loop is not necessary. 37 | while #tuples == 0 do 38 | tuples, err = 39 | vshard.router.call(bucket, 'read', 'do_select', 40 | {{pk}}, {timeout = 100}) 41 | if not tuples then 42 | log.info('Error during read loading: %s', err) 43 | tuples = {} 44 | end 45 | fiber.testcancel() 46 | end 47 | assert(tuples[1][1] == read_iterations) 48 | assert(tuples[1][2] == bucket) 49 | read_iterations = read_iterations + 1 50 | end 51 | end 52 | 53 | local function stop_loading() 54 | write_fiber:cancel() 55 | assert(write_iterations > 0) 56 | while write_iterations ~= read_iterations do fiber.sleep(0.1) end 57 | read_fiber:cancel() 58 | end 59 | 60 | local function start_loading() 61 | write_iterations = 0 62 | read_iterations = 0 63 | read_fiber = fiber.create(do_read_load) 64 | write_fiber = fiber.create(do_write_load) 65 | end 66 | 67 | local function check_loading_result() 68 | for i = 0, write_iterations do 69 | local bucket = i % bucket_count + 1 70 | local tuples, err = 71 | vshard.router.call(bucket, 'read', 'do_select', {{i}}, 72 | {timeout = 100}) 73 | if (not tuples or #tuples ~= 1) and i ~= write_iterations then 74 | return i, err 75 | end 76 | end 77 | return true 78 | end 79 | 80 | return { 81 | stop_loading = stop_loading, 82 | start_loading = start_loading, 83 | check_loading_result = check_loading_result, 84 | } 85 | -------------------------------------------------------------------------------- /test/rebalancer/router_1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | cfg = dofile('config.lua') 3 | vshard = require('vshard') 4 | os = require('os') 5 | fiber = require('fiber') 6 | 7 | box.cfg{} 8 | vshard.router.cfg(cfg) 9 | 10 | require('console').listen(os.getenv('ADMIN')) 11 | -------------------------------------------------------------------------------- /test/rebalancer/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Rebalancer tests 4 | script = test.lua 5 | config = engine.cfg 6 | is_parallel = False 7 | release_disabled = errinj.test.lua 8 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 9 | ../lua_libs/storage_template.lua config.lua 10 | router_1.lua box_1_a.lua box_1_b.lua box_2_a.lua box_2_b.lua 11 | box_3_a.lua box_3_b.lua rebalancer_utils.lua 12 | -------------------------------------------------------------------------------- /test/rebalancer/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/reload_evolution/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | local util = require('util') 3 | NAME = require('fio').basename(arg[0], '.lua') 4 | local source_path = arg[1] 5 | original_package_path = package.path 6 | if NAME == 'storage_2_a' then 7 | package.path = string.format('%s/?.lua;%s/?/init.lua;%s', source_path, 8 | source_path, package.path) 9 | end 10 | require('storage_template') 11 | -------------------------------------------------------------------------------- /test/reload_evolution/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/reload_evolution/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/reload_evolution/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/reload_evolution/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Reload evolution tests 4 | script = test.lua 5 | is_parallel = False 6 | lua_libs = ../lua_libs/storage_template.lua ../lua_libs/util.lua ../lua_libs/git_util.lua ../../example/localcfg.lua 7 | -------------------------------------------------------------------------------- /test/reload_evolution/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/replicaset-luatest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = luatest 3 | description = Replicaset tests 4 | is_parallel = True 5 | release_disabled = 6 | -------------------------------------------------------------------------------- /test/router-luatest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = luatest 3 | description = Router tests 4 | is_parallel = True 5 | release_disabled = 6 | -------------------------------------------------------------------------------- /test/router/boot_replica_first.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | test_run = require('test_run').new() 3 | | --- 4 | | ... 5 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c' } 6 | | --- 7 | | ... 8 | test_run:create_cluster(REPLICASET_1, 'router', {args = 'boot_before_cfg'}) 9 | | --- 10 | | ... 11 | util = require('util') 12 | | --- 13 | | ... 14 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 15 | | --- 16 | | ... 17 | _ = test_run:cmd("create server router with script='router/router_2.lua'") 18 | | --- 19 | | ... 20 | _ = test_run:cmd("start server router") 21 | | --- 22 | | ... 23 | 24 | -- 25 | -- gh-237: replica should be able to boot before master. Before the issue was 26 | -- fixed, replica always tried to install a trigger on _bucket space even when 27 | -- it was not created on a master yet - that lead to an exception in 28 | -- storage.cfg. Now it should not install the trigger at all, because anyway it 29 | -- is not needed on replica for anything. 30 | -- 31 | 32 | test_run:switch('box_1_b') 33 | | --- 34 | | - true 35 | | ... 36 | vshard.storage.cfg(cfg, instance_uuid) 37 | | --- 38 | | ... 39 | -- _bucket is not created yet. Will fail. 40 | util.check_error(vshard.storage.call, 1, 'read', 'echo', {100}) 41 | | --- 42 | | - attempt to index field '_bucket' (a nil value) 43 | | ... 44 | 45 | -- While waiting for the schema, gracefully handle deletions from _schema. 46 | ro = box.cfg.read_only 47 | | --- 48 | | ... 49 | box.cfg{read_only = false} 50 | | --- 51 | | ... 52 | box.space._schema:insert({'gh-276'}) 53 | | --- 54 | | - ['gh-276'] 55 | | ... 56 | box.space._schema:delete({'gh-276'}) 57 | | --- 58 | | - ['gh-276'] 59 | | ... 60 | box.cfg{read_only = ro} 61 | | --- 62 | | ... 63 | 64 | test_run:switch('default') 65 | | --- 66 | | - true 67 | | ... 68 | util.map_evals(test_run, {REPLICASET_1}, 'bootstrap_storage(\'memtx\')') 69 | | --- 70 | | ... 71 | 72 | test_run:switch('box_1_a') 73 | | --- 74 | | - true 75 | | ... 76 | vshard.storage.cfg(cfg, instance_uuid) 77 | | --- 78 | | ... 79 | 80 | test_run:switch('box_1_b') 81 | | --- 82 | | - true 83 | | ... 84 | test_run:wait_lsn('box_1_b', 'box_1_a') 85 | | --- 86 | | ... 87 | -- Fails, but gracefully. Because the bucket is not found here. 88 | vshard.storage.call(1, 'read', 'echo', {100}) 89 | | --- 90 | | - null 91 | | - bucket_id: 1 92 | | reason: Not found 93 | | code: 1 94 | | type: ShardingError 95 | | message: 'Cannot perform action with bucket 1, reason: Not found' 96 | | name: WRONG_BUCKET 97 | | ... 98 | -- 99 | -- gh-276: should have triggers. This is important for proper update of caches 100 | -- and in future for discarding refs in scope of gh-173. 101 | -- 102 | assert(#box.space._bucket:on_replace() == 1) 103 | | --- 104 | | - true 105 | | ... 106 | 107 | test_run:switch('router') 108 | | --- 109 | | - true 110 | | ... 111 | vshard.router.bootstrap() 112 | | --- 113 | | - true 114 | | ... 115 | vshard.router.callro(1, 'echo', {100}) 116 | | --- 117 | | - 100 118 | | ... 119 | 120 | test_run:switch("default") 121 | | --- 122 | | - true 123 | | ... 124 | test_run:cmd('stop server router') 125 | | --- 126 | | - true 127 | | ... 128 | test_run:cmd('delete server router') 129 | | --- 130 | | - true 131 | | ... 132 | test_run:drop_cluster(REPLICASET_1) 133 | | --- 134 | | ... 135 | -------------------------------------------------------------------------------- /test/router/boot_replica_first.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c' } 3 | test_run:create_cluster(REPLICASET_1, 'router', {args = 'boot_before_cfg'}) 4 | util = require('util') 5 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 6 | _ = test_run:cmd("create server router with script='router/router_2.lua'") 7 | _ = test_run:cmd("start server router") 8 | 9 | -- 10 | -- gh-237: replica should be able to boot before master. Before the issue was 11 | -- fixed, replica always tried to install a trigger on _bucket space even when 12 | -- it was not created on a master yet - that lead to an exception in 13 | -- storage.cfg. Now it should not install the trigger at all, because anyway it 14 | -- is not needed on replica for anything. 15 | -- 16 | 17 | test_run:switch('box_1_b') 18 | vshard.storage.cfg(cfg, instance_uuid) 19 | -- _bucket is not created yet. Will fail. 20 | util.check_error(vshard.storage.call, 1, 'read', 'echo', {100}) 21 | 22 | -- While waiting for the schema, gracefully handle deletions from _schema. 23 | ro = box.cfg.read_only 24 | box.cfg{read_only = false} 25 | box.space._schema:insert({'gh-276'}) 26 | box.space._schema:delete({'gh-276'}) 27 | box.cfg{read_only = ro} 28 | 29 | test_run:switch('default') 30 | util.map_evals(test_run, {REPLICASET_1}, 'bootstrap_storage(\'memtx\')') 31 | 32 | test_run:switch('box_1_a') 33 | vshard.storage.cfg(cfg, instance_uuid) 34 | 35 | test_run:switch('box_1_b') 36 | test_run:wait_lsn('box_1_b', 'box_1_a') 37 | -- Fails, but gracefully. Because the bucket is not found here. 38 | vshard.storage.call(1, 'read', 'echo', {100}) 39 | -- 40 | -- gh-276: should have triggers. This is important for proper update of caches 41 | -- and in future for discarding refs in scope of gh-173. 42 | -- 43 | assert(#box.space._bucket:on_replace() == 1) 44 | 45 | test_run:switch('router') 46 | vshard.router.bootstrap() 47 | vshard.router.callro(1, 'echo', {100}) 48 | 49 | test_run:switch("default") 50 | test_run:cmd('stop server router') 51 | test_run:cmd('delete server router') 52 | test_run:drop_cluster(REPLICASET_1) 53 | -------------------------------------------------------------------------------- /test/router/box_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | cfg = dofile('config.lua') 3 | require('storage_template') 4 | -------------------------------------------------------------------------------- /test/router/box_1_b.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/router/box_1_c.lua: -------------------------------------------------------------------------------- 1 | box_1_a.lua -------------------------------------------------------------------------------- /test/router/config.lua: -------------------------------------------------------------------------------- 1 | local util = require('util') 2 | 3 | return { 4 | sharding = { 5 | [util.replicasets[1]] = { 6 | replicas = { 7 | [util.name_to_uuid.box_1_a] = { 8 | uri = 'storage:storage@127.0.0.1:3301', 9 | name = 'box_1_a', 10 | master = true, 11 | }, 12 | [util.name_to_uuid.box_1_b] = { 13 | uri = 'storage:storage@127.0.0.1:3302', 14 | name = 'box_1_b', 15 | }, 16 | [util.name_to_uuid.box_1_c] = { 17 | uri = 'storage:storage@127.0.0.1:3303', 18 | name = 'box_1_c', 19 | }, 20 | } 21 | } 22 | }, 23 | replication_connect_quorum = 0, 24 | } 25 | -------------------------------------------------------------------------------- /test/router/empty_cluster.lua: -------------------------------------------------------------------------------- 1 | test.lua -------------------------------------------------------------------------------- /test/router/empty_cluster.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | test_run:cmd("create server router_1 with script='router/empty_cluster.lua'") 5 | --- 6 | - true 7 | ... 8 | test_run:cmd("start server router_1") 9 | --- 10 | - true 11 | ... 12 | test_run:switch('router_1') 13 | --- 14 | - true 15 | ... 16 | -- Start the database with sharding 17 | vshard = require('vshard') 18 | --- 19 | ... 20 | vshard.router.cfg({sharding = {}}) 21 | --- 22 | ... 23 | -- 24 | -- Check that failover works ok when a replicaset is set, but has 25 | -- no replicas. 26 | -- 27 | fiber = require('fiber') 28 | --- 29 | ... 30 | sharding = { ['cbf06940-0790-498b-948d-042b62cf3d29'] = { replicas = {} } } 31 | --- 32 | ... 33 | vshard.router.cfg({sharding = sharding}) 34 | --- 35 | ... 36 | for i = 1, 10 do vshard.router.static.failover_fiber:wakeup() fiber.sleep(0.001) end 37 | --- 38 | ... 39 | test_run:switch('default') 40 | --- 41 | - true 42 | ... 43 | test_run:cmd("stop server router_1") 44 | --- 45 | - true 46 | ... 47 | test_run:cmd("cleanup server router_1") 48 | --- 49 | - true 50 | ... 51 | -------------------------------------------------------------------------------- /test/router/empty_cluster.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | test_run:cmd("create server router_1 with script='router/empty_cluster.lua'") 3 | test_run:cmd("start server router_1") 4 | test_run:switch('router_1') 5 | 6 | -- Start the database with sharding 7 | vshard = require('vshard') 8 | vshard.router.cfg({sharding = {}}) 9 | 10 | -- 11 | -- Check that failover works ok when a replicaset is set, but has 12 | -- no replicas. 13 | -- 14 | fiber = require('fiber') 15 | sharding = { ['cbf06940-0790-498b-948d-042b62cf3d29'] = { replicas = {} } } 16 | vshard.router.cfg({sharding = sharding}) 17 | for i = 1, 10 do vshard.router.static.failover_fiber:wakeup() fiber.sleep(0.001) end 18 | 19 | test_run:switch('default') 20 | test_run:cmd("stop server router_1") 21 | test_run:cmd("cleanup server router_1") 22 | -------------------------------------------------------------------------------- /test/router/exponential_timeout.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'router') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'router') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 26 | --- 27 | ... 28 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 29 | --- 30 | ... 31 | -- Discovery algorithm and failover changes sometimes and should not affect the 32 | -- exponential timeout test. 33 | _ = test_run:cmd("start server router_1 with " .. \ 34 | "args='discovery_disable failover_disable'") 35 | --- 36 | ... 37 | _ = test_run:switch('router_1') 38 | --- 39 | ... 40 | util = require('util') 41 | --- 42 | ... 43 | rs1 = vshard.router.static.replicasets[util.replicasets[1]] 44 | --- 45 | ... 46 | rs2 = vshard.router.static.replicasets[util.replicasets[2]] 47 | --- 48 | ... 49 | util.collect_timeouts(rs1) 50 | --- 51 | - - fail: 0 52 | ok: 0 53 | timeout: 0.5 54 | - fail: 0 55 | ok: 0 56 | timeout: 0.5 57 | ... 58 | util.collect_timeouts(rs2) 59 | --- 60 | - - fail: 0 61 | ok: 0 62 | timeout: 0.5 63 | - fail: 0 64 | ok: 0 65 | timeout: 0.5 66 | ... 67 | -- 68 | -- Test a case, when timeout is already minimal and tries to 69 | -- decrease. 70 | -- 71 | for i = 1, 8 do rs1:callrw('echo') end 72 | --- 73 | ... 74 | util.collect_timeouts(rs1) 75 | --- 76 | - - fail: 0 77 | ok: 0 78 | timeout: 0.5 79 | - fail: 0 80 | ok: 8 81 | timeout: 0.5 82 | ... 83 | _ = rs1:callrw('echo') 84 | --- 85 | ... 86 | util.collect_timeouts(rs1) 87 | --- 88 | - - fail: 0 89 | ok: 0 90 | timeout: 0.5 91 | - fail: 0 92 | ok: 9 93 | timeout: 0.5 94 | ... 95 | -- 96 | -- Test oscillation protection. When a timeout is increased, it 97 | -- must not decrease until 10 success requests. 98 | -- 99 | for i = 1, 2 do rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN + 0.1}) end 100 | --- 101 | ... 102 | util.collect_timeouts(rs1) 103 | --- 104 | - - fail: 0 105 | ok: 0 106 | timeout: 0.5 107 | - fail: 2 108 | ok: 0 109 | timeout: 1 110 | ... 111 | for i = 1, 9 do rs1:callrw('echo') end 112 | --- 113 | ... 114 | util.collect_timeouts(rs1) 115 | --- 116 | - - fail: 0 117 | ok: 0 118 | timeout: 0.5 119 | - fail: 0 120 | ok: 9 121 | timeout: 1 122 | ... 123 | _ = rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN * 2 + 0.1}) 124 | --- 125 | ... 126 | util.collect_timeouts(rs1) 127 | --- 128 | - - fail: 0 129 | ok: 0 130 | timeout: 0.5 131 | - fail: 1 132 | ok: 0 133 | timeout: 1 134 | ... 135 | -- Ok, because new timeout is increased twice. 136 | _ = rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN * 1.8}) 137 | --- 138 | ... 139 | util.collect_timeouts(rs1) 140 | --- 141 | - - fail: 0 142 | ok: 0 143 | timeout: 0.5 144 | - fail: 0 145 | ok: 1 146 | timeout: 1 147 | ... 148 | for i = 1, 9 do rs1:callrw('echo') end 149 | --- 150 | ... 151 | util.collect_timeouts(rs1) 152 | --- 153 | - - fail: 0 154 | ok: 0 155 | timeout: 0.5 156 | - fail: 0 157 | ok: 1 158 | timeout: 0.5 159 | ... 160 | _ = test_run:switch("default") 161 | --- 162 | ... 163 | _ = test_run:cmd("stop server router_1") 164 | --- 165 | ... 166 | _ = test_run:cmd("cleanup server router_1") 167 | --- 168 | ... 169 | test_run:drop_cluster(REPLICASET_1) 170 | --- 171 | ... 172 | test_run:drop_cluster(REPLICASET_2) 173 | --- 174 | ... 175 | -------------------------------------------------------------------------------- /test/router/exponential_timeout.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 4 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 5 | 6 | test_run:create_cluster(REPLICASET_1, 'router') 7 | test_run:create_cluster(REPLICASET_2, 'router') 8 | util = require('util') 9 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 10 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 11 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 12 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 13 | -- Discovery algorithm and failover changes sometimes and should not affect the 14 | -- exponential timeout test. 15 | _ = test_run:cmd("start server router_1 with " .. \ 16 | "args='discovery_disable failover_disable'") 17 | _ = test_run:switch('router_1') 18 | util = require('util') 19 | 20 | rs1 = vshard.router.static.replicasets[util.replicasets[1]] 21 | rs2 = vshard.router.static.replicasets[util.replicasets[2]] 22 | 23 | util.collect_timeouts(rs1) 24 | util.collect_timeouts(rs2) 25 | 26 | -- 27 | -- Test a case, when timeout is already minimal and tries to 28 | -- decrease. 29 | -- 30 | for i = 1, 8 do rs1:callrw('echo') end 31 | util.collect_timeouts(rs1) 32 | _ = rs1:callrw('echo') 33 | util.collect_timeouts(rs1) 34 | 35 | -- 36 | -- Test oscillation protection. When a timeout is increased, it 37 | -- must not decrease until 10 success requests. 38 | -- 39 | for i = 1, 2 do rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN + 0.1}) end 40 | util.collect_timeouts(rs1) 41 | 42 | for i = 1, 9 do rs1:callrw('echo') end 43 | util.collect_timeouts(rs1) 44 | _ = rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN * 2 + 0.1}) 45 | util.collect_timeouts(rs1) 46 | -- Ok, because new timeout is increased twice. 47 | _ = rs1:callrw('sleep', {vshard.consts.CALL_TIMEOUT_MIN * 1.8}) 48 | util.collect_timeouts(rs1) 49 | for i = 1, 9 do rs1:callrw('echo') end 50 | util.collect_timeouts(rs1) 51 | 52 | _ = test_run:switch("default") 53 | _ = test_run:cmd("stop server router_1") 54 | _ = test_run:cmd("cleanup server router_1") 55 | test_run:drop_cluster(REPLICASET_1) 56 | test_run:drop_cluster(REPLICASET_2) 57 | -------------------------------------------------------------------------------- /test/router/reconnect_to_master.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | 5 | test_run:create_cluster(REPLICASET_1, 'router') 6 | test_run:create_cluster(REPLICASET_2, 'router') 7 | util = require('util') 8 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 9 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 10 | 11 | -- 12 | -- gh-51: discovery must work with replicas. 13 | -- Create 10 buckets and replicate them. Then start router and 14 | -- down master. Router discovery fiber must use replica to find 15 | -- buckets. 16 | -- 17 | _ = test_run:switch('storage_1_a') 18 | _bucket = box.space._bucket 19 | vshard.storage.bucket_force_create(1, 10) 20 | 21 | _ = test_run:switch('storage_1_b') 22 | _bucket = box.space._bucket 23 | while _bucket:count() ~= 10 do fiber.sleep(0.1) end 24 | 25 | -- Break a connection to a master. 26 | _ = test_run:cmd('stop server storage_1_a') 27 | 28 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 29 | _ = test_run:cmd("start server router_1 with args='discovery_disable'") 30 | 31 | _ = test_run:switch('router_1') 32 | util = require('util') 33 | 34 | reps = vshard.router.static.replicasets 35 | _ = test_run:cmd("setopt delimiter ';'") 36 | function is_disconnected() 37 | for i, rep in pairs(reps) do 38 | if rep.master.conn == nil or rep.master.conn.state ~= 'active' then 39 | return true 40 | end 41 | end 42 | return false 43 | end; 44 | function count_known_buckets() 45 | local known_buckets = 0 46 | for _, id in pairs(vshard.router.static.route_map) do 47 | known_buckets = known_buckets + 1 48 | end 49 | return known_buckets 50 | end; 51 | _ = test_run:cmd("setopt delimiter ''"); 52 | count_known_buckets() 53 | vshard.router.discovery_set('on') 54 | fiber = require('fiber') 55 | -- Use replica to find buckets. 56 | while count_known_buckets() ~= 10 do vshard.router.discovery_wakeup() fiber.sleep(0.1) end 57 | 58 | -- No master in replica set 1. 59 | is_disconnected() 60 | 61 | -- Wait until replica is connected to test alerts on unavailable 62 | -- master. 63 | while vshard.router.static.replicasets[util.replicasets[1]].replica == nil do fiber.sleep(0.1) end 64 | vshard.router.info() 65 | 66 | -- Return master. 67 | _ = test_run:cmd('start server storage_1_a') 68 | max_iters = 1000 69 | i = 0 70 | while is_disconnected() and i < max_iters do i = i + 1 fiber.sleep(0.1) end 71 | 72 | -- Master connection is active again. 73 | is_disconnected() 74 | 75 | -- 76 | -- gh-245: dynamic uri reconfiguration didn't work - even if URI was changed in 77 | -- the config for any instance, it used old connection, because reconfiguration 78 | -- compared connections by UUID instead of URI. 79 | -- 80 | util = require('util') 81 | -- Firstly, clean router from storage_1_a connection. 82 | rs1_uuid = util.replicasets[1] 83 | rs1_cfg = cfg.sharding[rs1_uuid] 84 | cfg.sharding[rs1_uuid] = nil 85 | vshard.router.cfg(cfg) 86 | -- Now break the URI in the config. 87 | old_uri = rs1_cfg.replicas[util.name_to_uuid.storage_1_a].uri 88 | rs1_cfg.replicas[util.name_to_uuid.storage_1_a].uri = 'https://bad_uri.com:123' 89 | -- Apply the bad config. 90 | cfg.sharding[rs1_uuid] = rs1_cfg 91 | vshard.router.cfg(cfg) 92 | -- Should fail - master is not available because of the bad URI. 93 | res, err = vshard.router.callrw(1, 'echo', {1}) 94 | res == nil and err ~= nil 95 | -- Repair the config. 96 | rs1_cfg.replicas[util.name_to_uuid.storage_1_a].uri = old_uri 97 | vshard.router.cfg(cfg) 98 | -- Should drop the old connection object and connect fine. 99 | vshard.router.callrw(1, 'echo', {1}) 100 | 101 | _ = test_run:switch("default") 102 | _ = test_run:cmd('stop server router_1') 103 | _ = test_run:cmd('cleanup server router_1') 104 | test_run:drop_cluster(REPLICASET_2) 105 | test_run:drop_cluster(REPLICASET_1) 106 | -------------------------------------------------------------------------------- /test/router/retry_reads.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'router') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'router') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 26 | --- 27 | ... 28 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 29 | --- 30 | ... 31 | -- Discovery algorithm and failover changes sometimes and should not affect the 32 | -- exponential timeout test. 33 | _ = test_run:cmd("start server router_1 with " .. \ 34 | "args='discovery_disable failover_disable'") 35 | --- 36 | ... 37 | _ = test_run:switch('router_1') 38 | --- 39 | ... 40 | util = require('util') 41 | --- 42 | ... 43 | rs1 = vshard.router.static.replicasets[util.replicasets[1]] 44 | --- 45 | ... 46 | min_timeout = vshard.consts.CALL_TIMEOUT_MIN 47 | --- 48 | ... 49 | -- 50 | -- Try two read requests with exection time = MIN_TIMEOUT + 0.5. 51 | -- It leads to increased network timeout. 52 | -- 53 | util.collect_timeouts(rs1) 54 | --- 55 | - - fail: 0 56 | ok: 0 57 | timeout: 0.5 58 | - fail: 0 59 | ok: 0 60 | timeout: 0.5 61 | ... 62 | _ = rs1:callro('sleep', {min_timeout + 0.5}, {timeout = min_timeout}) 63 | --- 64 | ... 65 | _ = rs1:callro('sleep', {min_timeout + 0.5}, {timeout = min_timeout}) 66 | --- 67 | ... 68 | util.collect_timeouts(rs1) 69 | --- 70 | - - fail: 0 71 | ok: 0 72 | timeout: 0.5 73 | - fail: 2 74 | ok: 0 75 | timeout: 1 76 | ... 77 | for i = 1, 9 do rs1:callro('echo') end 78 | --- 79 | ... 80 | util.collect_timeouts(rs1) 81 | --- 82 | - - fail: 0 83 | ok: 0 84 | timeout: 0.5 85 | - fail: 0 86 | ok: 9 87 | timeout: 1 88 | ... 89 | -- 90 | -- Ensure the luajit errors are not retried. 91 | -- 92 | fiber = require('fiber') 93 | --- 94 | ... 95 | start = fiber.time() 96 | --- 97 | ... 98 | _, e = rs1:callro('raise_luajit_error', {}, {timeout = 10}) 99 | --- 100 | ... 101 | string.match(e.message, 'assertion') 102 | --- 103 | - assertion 104 | ... 105 | fiber.time() - start < 1 106 | --- 107 | - true 108 | ... 109 | start = fiber.time() 110 | --- 111 | ... 112 | _, e = rs1:callro('raise_client_error', {}, {timeout = 5}) 113 | --- 114 | ... 115 | fiber.time() - start < 1 116 | --- 117 | - true 118 | ... 119 | util.portable_error(e) 120 | --- 121 | - type: ClientError 122 | message: Unknown error 123 | ... 124 | _, e = rs1:callro('sleep', {1}, {timeout = 0.0001}) 125 | --- 126 | ... 127 | util.is_timeout_error(e) 128 | --- 129 | - true 130 | ... 131 | -- 132 | -- Do not send multiple requests during timeout - it brokes long 133 | -- polling requests. 134 | -- 135 | _ = rs1:callro('sleep', {4}, {timeout = 100}) 136 | --- 137 | ... 138 | _ 139 | --- 140 | - true 141 | ... 142 | _ = test_run:switch("default") 143 | --- 144 | ... 145 | _ = test_run:cmd("stop server router_1") 146 | --- 147 | ... 148 | _ = test_run:cmd("cleanup server router_1") 149 | --- 150 | ... 151 | test_run:drop_cluster(REPLICASET_1) 152 | --- 153 | ... 154 | test_run:drop_cluster(REPLICASET_2) 155 | --- 156 | ... 157 | -------------------------------------------------------------------------------- /test/router/retry_reads.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 4 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 5 | 6 | test_run:create_cluster(REPLICASET_1, 'router') 7 | test_run:create_cluster(REPLICASET_2, 'router') 8 | util = require('util') 9 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 10 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 11 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 12 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 13 | -- Discovery algorithm and failover changes sometimes and should not affect the 14 | -- exponential timeout test. 15 | _ = test_run:cmd("start server router_1 with " .. \ 16 | "args='discovery_disable failover_disable'") 17 | _ = test_run:switch('router_1') 18 | util = require('util') 19 | 20 | rs1 = vshard.router.static.replicasets[util.replicasets[1]] 21 | min_timeout = vshard.consts.CALL_TIMEOUT_MIN 22 | 23 | -- 24 | -- Try two read requests with exection time = MIN_TIMEOUT + 0.5. 25 | -- It leads to increased network timeout. 26 | -- 27 | util.collect_timeouts(rs1) 28 | _ = rs1:callro('sleep', {min_timeout + 0.5}, {timeout = min_timeout}) 29 | _ = rs1:callro('sleep', {min_timeout + 0.5}, {timeout = min_timeout}) 30 | util.collect_timeouts(rs1) 31 | for i = 1, 9 do rs1:callro('echo') end 32 | util.collect_timeouts(rs1) 33 | 34 | -- 35 | -- Ensure the luajit errors are not retried. 36 | -- 37 | fiber = require('fiber') 38 | start = fiber.time() 39 | _, e = rs1:callro('raise_luajit_error', {}, {timeout = 10}) 40 | string.match(e.message, 'assertion') 41 | fiber.time() - start < 1 42 | 43 | start = fiber.time() 44 | _, e = rs1:callro('raise_client_error', {}, {timeout = 5}) 45 | fiber.time() - start < 1 46 | util.portable_error(e) 47 | 48 | _, e = rs1:callro('sleep', {1}, {timeout = 0.0001}) 49 | util.is_timeout_error(e) 50 | 51 | -- 52 | -- Do not send multiple requests during timeout - it brokes long 53 | -- polling requests. 54 | -- 55 | _ = rs1:callro('sleep', {4}, {timeout = 100}) 56 | _ 57 | 58 | _ = test_run:switch("default") 59 | _ = test_run:cmd("stop server router_1") 60 | _ = test_run:cmd("cleanup server router_1") 61 | test_run:drop_cluster(REPLICASET_1) 62 | test_run:drop_cluster(REPLICASET_2) 63 | -------------------------------------------------------------------------------- /test/router/router_1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | fiber = require('fiber') 5 | 6 | -- Check if we are running under test-run 7 | if os.getenv('ADMIN') then 8 | test_run = require('test_run').new() 9 | require('console').listen(os.getenv('ADMIN')) 10 | end 11 | 12 | replicasets = {'cbf06940-0790-498b-948d-042b62cf3d29', 13 | 'ac522f65-aa94-4134-9f64-51ee384f1a54'} 14 | 15 | -- Call a configuration provider 16 | cfg = dofile('localcfg.lua') 17 | if arg[1] == 'discovery_disable' then 18 | cfg.discovery_mode = 'off' 19 | end 20 | 21 | -- Start the database with sharding 22 | vshard = require('vshard') 23 | 24 | if arg[2] == 'failover_disable' then 25 | vshard.router.internal.errinj.ERRINJ_FAILOVER_DELAY = true 26 | end 27 | 28 | vshard.router.cfg(cfg) 29 | 30 | if arg[2] == 'failover_disable' then 31 | while vshard.router.internal.errinj.ERRINJ_FAILOVER_DELAY ~= 'in' do 32 | router.failover_fiber:wakeup() 33 | fiber.sleep(0.01) 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /test/router/router_2.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | cfg = dofile('config.lua') 3 | require('console').listen(os.getenv('ADMIN')) 4 | vshard = require('vshard') 5 | vshard.router.cfg(cfg) 6 | -------------------------------------------------------------------------------- /test/router/router_3.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | fiber = require('fiber') 3 | cfg = dofile('config.lua') 4 | require('console').listen(os.getenv('ADMIN')) 5 | vshard = require('vshard') 6 | box.cfg{} 7 | -------------------------------------------------------------------------------- /test/router/router_and_rebalancing.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'router') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'router') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 26 | --- 27 | ... 28 | _ = test_run:cmd("start server router_1") 29 | --- 30 | ... 31 | -- 32 | -- gh-97: unknown bucket_count in router.info() can be < 0 during 33 | -- rebalancing. 34 | -- 35 | _ = test_run:switch('router_1') 36 | --- 37 | ... 38 | util = require('util') 39 | --- 40 | ... 41 | fiber = require('fiber') 42 | --- 43 | ... 44 | vshard.router.bootstrap() 45 | --- 46 | - true 47 | ... 48 | info = vshard.router.info() 49 | --- 50 | ... 51 | while info.bucket.unknown ~= 0 do vshard.router.discovery_wakeup() fiber.sleep(0.01) info = vshard.router.info() end 52 | --- 53 | ... 54 | _ = fiber.create(function() while true do vshard.router.discovery_wakeup() fiber.sleep(0.1) end end) 55 | --- 56 | ... 57 | _ = test_run:switch('storage_1_a') 58 | --- 59 | ... 60 | cfg.sharding[util.replicasets[1]].weight = 2 61 | --- 62 | ... 63 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 64 | --- 65 | ... 66 | _ = test_run:switch('storage_2_a') 67 | --- 68 | ... 69 | cfg.sharding[util.replicasets[1]].weight = 2 70 | --- 71 | ... 72 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_2_a) 73 | --- 74 | ... 75 | _ = fiber.create(function() while true do vshard.storage.rebalancer_wakeup() fiber.sleep(0.1) end end) 76 | --- 77 | ... 78 | _ = test_run:switch('router_1') 79 | --- 80 | ... 81 | _ = test_run:cmd("setopt delimiter ';'") 82 | --- 83 | ... 84 | for i = 1, 10 do 85 | local b = vshard.router.info().bucket 86 | assert(b.unknown >= 0, "unknown >= 0") 87 | assert(b.available_rw >= 0, "b.available_rw >= 0") 88 | assert(b.unknown + b.available_rw == vshard.router.bucket_count()) 89 | fiber.sleep(0.1) 90 | end; 91 | --- 92 | ... 93 | _ = test_run:cmd("setopt delimiter ''"); 94 | --- 95 | ... 96 | _ = test_run:switch('default') 97 | --- 98 | ... 99 | _ = test_run:cmd("stop server router_1") 100 | --- 101 | ... 102 | _ = test_run:cmd("cleanup server router_1") 103 | --- 104 | ... 105 | test_run:drop_cluster(REPLICASET_1) 106 | --- 107 | ... 108 | test_run:drop_cluster(REPLICASET_2) 109 | --- 110 | ... 111 | -------------------------------------------------------------------------------- /test/router/router_and_rebalancing.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | 5 | test_run:create_cluster(REPLICASET_1, 'router') 6 | test_run:create_cluster(REPLICASET_2, 'router') 7 | util = require('util') 8 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 9 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 10 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 11 | _ = test_run:cmd("start server router_1") 12 | 13 | -- 14 | -- gh-97: unknown bucket_count in router.info() can be < 0 during 15 | -- rebalancing. 16 | -- 17 | _ = test_run:switch('router_1') 18 | util = require('util') 19 | fiber = require('fiber') 20 | 21 | vshard.router.bootstrap() 22 | info = vshard.router.info() 23 | while info.bucket.unknown ~= 0 do vshard.router.discovery_wakeup() fiber.sleep(0.01) info = vshard.router.info() end 24 | _ = fiber.create(function() while true do vshard.router.discovery_wakeup() fiber.sleep(0.1) end end) 25 | 26 | _ = test_run:switch('storage_1_a') 27 | cfg.sharding[util.replicasets[1]].weight = 2 28 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 29 | 30 | _ = test_run:switch('storage_2_a') 31 | cfg.sharding[util.replicasets[1]].weight = 2 32 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_2_a) 33 | _ = fiber.create(function() while true do vshard.storage.rebalancer_wakeup() fiber.sleep(0.1) end end) 34 | 35 | _ = test_run:switch('router_1') 36 | _ = test_run:cmd("setopt delimiter ';'") 37 | for i = 1, 10 do 38 | local b = vshard.router.info().bucket 39 | assert(b.unknown >= 0, "unknown >= 0") 40 | assert(b.available_rw >= 0, "b.available_rw >= 0") 41 | assert(b.unknown + b.available_rw == vshard.router.bucket_count()) 42 | fiber.sleep(0.1) 43 | end; 44 | _ = test_run:cmd("setopt delimiter ''"); 45 | 46 | _ = test_run:switch('default') 47 | _ = test_run:cmd("stop server router_1") 48 | _ = test_run:cmd("cleanup server router_1") 49 | test_run:drop_cluster(REPLICASET_1) 50 | test_run:drop_cluster(REPLICASET_2) 51 | -------------------------------------------------------------------------------- /test/router/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/router/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/router/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/router/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/router/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Router tests 4 | script = test.lua 5 | is_parallel = False 6 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 7 | ../lua_libs/storage_template.lua 8 | ../../example/localcfg.lua 9 | config.lua box_1_a.lua box_1_b.lua box_1_c.lua router_2.lua 10 | -------------------------------------------------------------------------------- /test/router/sync.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | test_run:cmd("push filter 'line: *[0-9]+' to 'line: '") 5 | --- 6 | - true 7 | ... 8 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 9 | --- 10 | ... 11 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 12 | --- 13 | ... 14 | test_run:create_cluster(REPLICASET_1, 'router') 15 | --- 16 | ... 17 | test_run:create_cluster(REPLICASET_2, 'router') 18 | --- 19 | ... 20 | util = require('util') 21 | --- 22 | ... 23 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 24 | --- 25 | ... 26 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 27 | --- 28 | ... 29 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 30 | --- 31 | ... 32 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 33 | --- 34 | ... 35 | _ = test_run:cmd("start server router_1") 36 | --- 37 | ... 38 | _ = test_run:switch("router_1") 39 | --- 40 | ... 41 | util = require('util') 42 | --- 43 | ... 44 | vshard.router.bootstrap() 45 | --- 46 | - true 47 | ... 48 | res, err = vshard.router.sync(-1) 49 | --- 50 | ... 51 | util.portable_error(err) 52 | --- 53 | - type: ClientError 54 | message: Timeout exceeded 55 | ... 56 | res, err = vshard.router.sync(0) 57 | --- 58 | ... 59 | util.is_timeout_error(err) 60 | --- 61 | - true 62 | ... 63 | -- 64 | -- gh-190: router should not ignore cfg.sync_timeout. 65 | -- 66 | test_run:cmd('stop server storage_1_b') 67 | --- 68 | - true 69 | ... 70 | test_run:switch('storage_1_a') 71 | --- 72 | - true 73 | ... 74 | cfg.sync_timeout = 0.01 75 | --- 76 | ... 77 | vshard.storage.cfg(cfg, box.info.uuid) 78 | --- 79 | ... 80 | test_run:switch('router_1') 81 | --- 82 | - true 83 | ... 84 | cfg.sync_timeout = 0.1 85 | --- 86 | ... 87 | vshard.router.cfg(cfg) 88 | --- 89 | ... 90 | start = fiber.time() 91 | --- 92 | ... 93 | ok, err = vshard.router.sync() 94 | --- 95 | ... 96 | ok, err ~= nil 97 | --- 98 | - null 99 | - true 100 | ... 101 | -- Storage 1a has no 1b replica available. Its sync would fail in 102 | -- ~0.01 seconds by timeout by default. But router should pass its 103 | -- own sync_timeout - 0.1. 104 | fiber.time() - start >= 0.1 105 | --- 106 | - true 107 | ... 108 | cfg.sync_timeout = nil 109 | --- 110 | ... 111 | vshard.router.cfg(cfg) 112 | --- 113 | ... 114 | test_run:switch('storage_1_a') 115 | --- 116 | - true 117 | ... 118 | cfg.sync_timeout = nil 119 | --- 120 | ... 121 | vshard.storage.cfg(cfg, box.info.uuid) 122 | --- 123 | ... 124 | test_run:switch('router_1') 125 | --- 126 | - true 127 | ... 128 | test_run:cmd('start server storage_1_b') 129 | --- 130 | - true 131 | ... 132 | test_run:cmd('stop server storage_1_a') 133 | --- 134 | - true 135 | ... 136 | ok, err = nil, nil 137 | --- 138 | ... 139 | -- Check that explicit timeout overwrites automatic ones. 140 | for i = 1, 10 do ok, err = vshard.router.sync(0.01) end 141 | --- 142 | ... 143 | ok, err ~= nil 144 | --- 145 | - null 146 | - true 147 | ... 148 | test_run:cmd('start server storage_1_a') 149 | --- 150 | - true 151 | ... 152 | _ = test_run:switch("default") 153 | --- 154 | ... 155 | _ = test_run:cmd("stop server router_1") 156 | --- 157 | ... 158 | _ = test_run:cmd("cleanup server router_1") 159 | --- 160 | ... 161 | test_run:drop_cluster(REPLICASET_1) 162 | --- 163 | ... 164 | test_run:drop_cluster(REPLICASET_2) 165 | --- 166 | ... 167 | -------------------------------------------------------------------------------- /test/router/sync.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | test_run:cmd("push filter 'line: *[0-9]+' to 'line: '") 3 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 4 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 5 | test_run:create_cluster(REPLICASET_1, 'router') 6 | test_run:create_cluster(REPLICASET_2, 'router') 7 | util = require('util') 8 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 9 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 10 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 11 | _ = test_run:cmd("create server router_1 with script='router/router_1.lua'") 12 | _ = test_run:cmd("start server router_1") 13 | _ = test_run:switch("router_1") 14 | util = require('util') 15 | 16 | vshard.router.bootstrap() 17 | 18 | res, err = vshard.router.sync(-1) 19 | util.portable_error(err) 20 | res, err = vshard.router.sync(0) 21 | util.is_timeout_error(err) 22 | 23 | -- 24 | -- gh-190: router should not ignore cfg.sync_timeout. 25 | -- 26 | test_run:cmd('stop server storage_1_b') 27 | test_run:switch('storage_1_a') 28 | cfg.sync_timeout = 0.01 29 | vshard.storage.cfg(cfg, box.info.uuid) 30 | 31 | test_run:switch('router_1') 32 | cfg.sync_timeout = 0.1 33 | vshard.router.cfg(cfg) 34 | start = fiber.time() 35 | ok, err = vshard.router.sync() 36 | ok, err ~= nil 37 | -- Storage 1a has no 1b replica available. Its sync would fail in 38 | -- ~0.01 seconds by timeout by default. But router should pass its 39 | -- own sync_timeout - 0.1. 40 | fiber.time() - start >= 0.1 41 | cfg.sync_timeout = nil 42 | vshard.router.cfg(cfg) 43 | 44 | test_run:switch('storage_1_a') 45 | cfg.sync_timeout = nil 46 | vshard.storage.cfg(cfg, box.info.uuid) 47 | 48 | test_run:switch('router_1') 49 | test_run:cmd('start server storage_1_b') 50 | 51 | test_run:cmd('stop server storage_1_a') 52 | ok, err = nil, nil 53 | -- Check that explicit timeout overwrites automatic ones. 54 | for i = 1, 10 do ok, err = vshard.router.sync(0.01) end 55 | ok, err ~= nil 56 | test_run:cmd('start server storage_1_a') 57 | 58 | _ = test_run:switch("default") 59 | _ = test_run:cmd("stop server router_1") 60 | _ = test_run:cmd("cleanup server router_1") 61 | test_run:drop_cluster(REPLICASET_1) 62 | test_run:drop_cluster(REPLICASET_2) 63 | -------------------------------------------------------------------------------- /test/router/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/router/wrong_config.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c' } 5 | --- 6 | ... 7 | test_run:create_cluster(REPLICASET_1, 'router') 8 | --- 9 | ... 10 | util = require('util') 11 | --- 12 | ... 13 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 14 | --- 15 | ... 16 | util.map_evals(test_run, {REPLICASET_1}, 'bootstrap_storage(\'memtx\')') 17 | --- 18 | ... 19 | _ = test_run:cmd("create server router_3 with script='router/router_3.lua'") 20 | --- 21 | ... 22 | _ = test_run:cmd("start server router_3") 23 | --- 24 | ... 25 | _ = test_run:switch("router_3") 26 | --- 27 | ... 28 | cfg.bucket_count = 3000 29 | --- 30 | ... 31 | vshard.router.cfg(cfg) 32 | --- 33 | ... 34 | vshard.router.bootstrap() 35 | --- 36 | - true 37 | ... 38 | -- 39 | -- gh-179: negative router.info bucket count, when it is 40 | -- configured improperly. 41 | -- 42 | cfg.bucket_count = 1000 43 | --- 44 | ... 45 | r = vshard.router.new('gh-179', cfg) 46 | --- 47 | ... 48 | while r:info().bucket.available_rw ~= 3000 do \ 49 | r:discovery_wakeup() \ 50 | fiber.sleep(0.1) \ 51 | end 52 | --- 53 | ... 54 | i = r:info() 55 | --- 56 | ... 57 | i.bucket 58 | --- 59 | - unreachable: 0 60 | available_ro: 0 61 | unknown: ??? 62 | available_rw: 3000 63 | ... 64 | i.alerts 65 | --- 66 | - - ['INVALID_CFG', 'Invalid configuration: probably router''s cfg.bucket_count is 67 | different from storages'' one, difference is 2000'] 68 | ... 69 | _ = test_run:switch("default") 70 | --- 71 | ... 72 | _ = test_run:cmd("stop server router_3") 73 | --- 74 | ... 75 | _ = test_run:cmd("cleanup server router_3") 76 | --- 77 | ... 78 | test_run:drop_cluster(REPLICASET_1) 79 | --- 80 | ... 81 | _ = test_run:cmd('clear filter') 82 | --- 83 | ... 84 | -------------------------------------------------------------------------------- /test/router/wrong_config.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'box_1_a', 'box_1_b', 'box_1_c' } 3 | test_run:create_cluster(REPLICASET_1, 'router') 4 | util = require('util') 5 | util.wait_master(test_run, REPLICASET_1, 'box_1_a') 6 | util.map_evals(test_run, {REPLICASET_1}, 'bootstrap_storage(\'memtx\')') 7 | _ = test_run:cmd("create server router_3 with script='router/router_3.lua'") 8 | _ = test_run:cmd("start server router_3") 9 | _ = test_run:switch("router_3") 10 | 11 | cfg.bucket_count = 3000 12 | vshard.router.cfg(cfg) 13 | vshard.router.bootstrap() 14 | 15 | -- 16 | -- gh-179: negative router.info bucket count, when it is 17 | -- configured improperly. 18 | -- 19 | cfg.bucket_count = 1000 20 | r = vshard.router.new('gh-179', cfg) 21 | while r:info().bucket.available_rw ~= 3000 do \ 22 | r:discovery_wakeup() \ 23 | fiber.sleep(0.1) \ 24 | end 25 | i = r:info() 26 | i.bucket 27 | i.alerts 28 | 29 | _ = test_run:switch("default") 30 | _ = test_run:cmd("stop server router_3") 31 | _ = test_run:cmd("cleanup server router_3") 32 | test_run:drop_cluster(REPLICASET_1) 33 | _ = test_run:cmd('clear filter') 34 | -------------------------------------------------------------------------------- /test/storage-luatest/auto_master_2_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vtest = require('test.luatest_helpers.vtest') 3 | 4 | local test_group = t.group('storage') 5 | 6 | local cfg_template = { 7 | sharding = { 8 | { 9 | master = 'auto', 10 | replicas = { 11 | replica_1_a = { 12 | read_only = false, 13 | }, 14 | replica_1_b = { 15 | read_only = true, 16 | }, 17 | }, 18 | }, 19 | }, 20 | bucket_count = 20, 21 | replication_timeout = 0.1, 22 | } 23 | local global_cfg 24 | 25 | test_group.before_all(function(g) 26 | global_cfg = vtest.config_new(cfg_template) 27 | 28 | vtest.cluster_new(g, global_cfg) 29 | vtest.cluster_bootstrap(g, global_cfg) 30 | vtest.cluster_wait_vclock_all(g) 31 | vtest.cluster_rebalancer_disable(g) 32 | end) 33 | 34 | test_group.after_all(function(g) 35 | g.cluster:drop() 36 | end) 37 | 38 | test_group.test_bootstrap = function(g) 39 | g.replica_1_a:exec(function() 40 | ilt.assert(ivshard.storage.internal.is_master) 41 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 42 | end) 43 | g.replica_1_b:exec(function() 44 | ilt.assert(not ivshard.storage.internal.is_master) 45 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 46 | end) 47 | end 48 | 49 | test_group.test_change = function(g) 50 | g.replica_1_a:exec(function() 51 | box.cfg{read_only = true} 52 | ifiber.yield() 53 | ilt.assert(not ivshard.storage.internal.is_master) 54 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 55 | end) 56 | g.replica_1_b:exec(function() 57 | box.cfg{read_only = false} 58 | ifiber.yield() 59 | ilt.assert(ivshard.storage.internal.is_master) 60 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 61 | end) 62 | end 63 | 64 | test_group.test_turn_off_and_on = function(g) 65 | local new_cfg_template = table.deepcopy(cfg_template) 66 | local rs_cfg = new_cfg_template.sharding[1] 67 | rs_cfg.master = nil 68 | rs_cfg.replicas.replica_1_a.read_only = nil 69 | rs_cfg.replicas.replica_1_b.read_only = nil 70 | local new_global_cfg = vtest.config_new(new_cfg_template) 71 | vtest.cluster_cfg(g, new_global_cfg) 72 | 73 | local function check_master_is_fully_off() 74 | ilt.assert(not ivshard.storage.internal.is_master) 75 | ilt.assert(not ivshard.storage.internal.this_replicaset.is_master_auto) 76 | box.cfg{read_only = false} 77 | ilt.assert(not box.info.ro) 78 | ilt.assert(not ivshard.storage.internal.is_master) 79 | box.cfg{read_only = true} 80 | end 81 | g.replica_1_a:exec(check_master_is_fully_off) 82 | g.replica_1_b:exec(check_master_is_fully_off) 83 | 84 | rs_cfg.replicas.replica_1_a.master = true 85 | new_global_cfg = vtest.config_new(new_cfg_template) 86 | vtest.cluster_cfg(g, new_global_cfg) 87 | 88 | g.replica_1_a:exec(function() 89 | ilt.assert(ivshard.storage.internal.is_master) 90 | ilt.assert(not ivshard.storage.internal.this_replicaset.is_master_auto) 91 | box.cfg{read_only = true} 92 | ilt.assert(box.info.ro) 93 | ilt.assert(ivshard.storage.internal.is_master) 94 | box.cfg{read_only = false} 95 | end) 96 | g.replica_1_b:exec(check_master_is_fully_off) 97 | 98 | vtest.cluster_cfg(g, global_cfg) 99 | g.replica_1_a:exec(function() 100 | ilt.assert(ivshard.storage.internal.is_master) 101 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 102 | end) 103 | g.replica_1_b:exec(function() 104 | ilt.assert(not ivshard.storage.internal.is_master) 105 | ilt.assert(ivshard.storage.internal.this_replicaset.is_master_auto) 106 | end) 107 | end 108 | -------------------------------------------------------------------------------- /test/storage-luatest/box_cfg_mode_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vtest = require('test.luatest_helpers.vtest') 3 | 4 | local test_group = t.group() 5 | 6 | local cfg_template = { 7 | sharding = { 8 | { 9 | replicas = { 10 | replica_1_a = {master = true}, 11 | }, 12 | }, 13 | { 14 | replicas = { 15 | replica_2_a = {master = true}, 16 | }, 17 | }, 18 | }, 19 | bucket_count = 20, 20 | box_cfg_mode = 'manual', 21 | replication_timeout = 0.1, 22 | } 23 | 24 | local global_cfg 25 | 26 | test_group.before_all(function(g) 27 | global_cfg = vtest.config_new(cfg_template) 28 | vtest.cluster_new(g, global_cfg) 29 | vtest.cluster_bootstrap(g, global_cfg) 30 | vtest.cluster_wait_vclock_all(g) 31 | vtest.cluster_rebalancer_disable(g) 32 | end) 33 | 34 | test_group.after_all(function(g) 35 | g.cluster:drop() 36 | end) 37 | 38 | test_group.test_storage_disabled_error = function(g) 39 | g.replica_1_a:exec(function(cfg) 40 | local old_box = box.cfg 41 | box.cfg = function() end 42 | ilt.assert_error_msg_contains('Box must be configured', function() 43 | ivshard.storage.cfg(cfg, box.info.uuid) 44 | end) 45 | box.cfg = old_box 46 | end, {global_cfg}) 47 | end 48 | 49 | -- 50 | -- vtest.cluster_new does exactly, what user should do, 51 | -- when using 'manual' box_cfg_mode: call box.cfg prior 52 | -- to executing vshard.storage.cfg. So, just some basic test. 53 | -- 54 | test_group.test_storage_basic = function(g) 55 | -- noreset in order to not depend on reconfiguration in previous tests. 56 | t.assert(g.replica_1_a:grep_log('Box configuration was skipped', 57 | 65536, {noreset = true})) 58 | local bid = g.replica_1_a:exec(function(uuid) 59 | local bid = _G.get_first_bucket() 60 | local ok, err = ivshard.storage.bucket_send(bid, uuid) 61 | ilt.assert_equals(err, nil) 62 | ilt.assert(ok) 63 | _G.bucket_gc_wait() 64 | return bid 65 | end, {g.replica_2_a:replicaset_uuid()}) 66 | -- Restore balance. 67 | g.replica_2_a:exec(function(bid, uuid) 68 | local ok, err = ivshard.storage.bucket_send(bid, uuid) 69 | ilt.assert_equals(err, nil) 70 | ilt.assert(ok) 71 | _G.bucket_gc_wait() 72 | end, {bid, g.replica_1_a:replicaset_uuid()}) 73 | end 74 | -------------------------------------------------------------------------------- /test/storage-luatest/persistent_names_2_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vtest = require('test.luatest_helpers.vtest') 3 | local vutil = require('vshard.util') 4 | 5 | local test_group = t.group('storage') 6 | 7 | local cfg_template = { 8 | sharding = { 9 | repliacset_1 = { 10 | replicas = { 11 | replica_1_a = { 12 | master = true 13 | }, 14 | replica_1_b = {}, 15 | }, 16 | }, 17 | }, 18 | bucket_count = 20, 19 | identification_mode = 'name_as_key' 20 | } 21 | 22 | local global_cfg 23 | 24 | test_group.before_all(function(g) 25 | t.run_only_if(vutil.feature.persistent_names) 26 | global_cfg = vtest.config_new(cfg_template) 27 | 28 | vtest.cluster_new(g, global_cfg) 29 | vtest.cluster_bootstrap(g, global_cfg) 30 | vtest.cluster_wait_vclock_all(g) 31 | vtest.cluster_rebalancer_disable(g) 32 | end) 33 | 34 | test_group.after_all(function(g) 35 | g.cluster:drop() 36 | end) 37 | 38 | test_group.test_named_replicaset_alerts_when_replica_disconnects = function(g) 39 | g.replica_1_b:stop() 40 | local alerts = g.replica_1_a:exec(function() 41 | return ivshard.storage.info().alerts 42 | end) 43 | vtest.info_assert_alert(alerts, 'UNREACHABLE_REPLICA') 44 | vtest.info_assert_alert(alerts, 'UNREACHABLE_REPLICASET') 45 | g.replica_1_b:start() 46 | end 47 | -------------------------------------------------------------------------------- /test/storage-luatest/schema_management_mode_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vtest = require('test.luatest_helpers.vtest') 3 | local vutil = require('vshard.util') 4 | 5 | local group_config = {{mode = 'auto'}, {mode = 'manual_access'}} 6 | 7 | if vutil.feature.memtx_mvcc then 8 | table.insert(group_config, { 9 | mode = 'auto', memtx_use_mvcc_engine = true 10 | }) 11 | table.insert(group_config, { 12 | mode = 'manual_access', memtx_use_mvcc_engine = true 13 | }) 14 | end 15 | 16 | local test_group = t.group('storage_schema_management_mode', group_config) 17 | 18 | local cfg_template = { 19 | sharding = { 20 | { 21 | master = 'auto', 22 | replicas = { 23 | replica_1_a = {read_only = false}, 24 | replica_1_b = {read_only = true}, 25 | }, 26 | }, 27 | { 28 | master = 'auto', 29 | replicas = { 30 | replica_2_a = {read_only = false}, 31 | }, 32 | }, 33 | }, 34 | bucket_count = 10, 35 | } 36 | 37 | test_group.before_all(function(g) 38 | cfg_template.memtx_use_mvcc_engine = g.params.memtx_use_mvcc_engine 39 | cfg_template.schema_management_mode = g.params.mode 40 | local cfg = vtest.config_new(cfg_template) 41 | 42 | vtest.cluster_new(g, cfg) 43 | vtest.cluster_bootstrap(g, cfg) 44 | vtest.cluster_rebalancer_disable(g) 45 | end) 46 | 47 | test_group.after_all(function(g) 48 | g.cluster:drop() 49 | g.cluster = nil 50 | end) 51 | 52 | test_group.test_boot_with_mode_manual_access = function(g) 53 | local bid = g.replica_1_a:exec(function(uuid) 54 | local bid = _G.get_first_bucket() 55 | local ok, err = ivshard.storage.bucket_send(bid, uuid, 56 | {timeout = iwait_timeout}) 57 | ilt.assert_equals(err, nil) 58 | ilt.assert(ok) 59 | _G.bucket_gc_wait() 60 | return bid 61 | end, {g.replica_2_a:replicaset_uuid()}) 62 | -- 63 | -- Switch to another mode. Still works fine. 64 | -- 65 | local test_template = table.deepcopy(cfg_template) 66 | if test_template.schema_management_mode == 'auto' then 67 | test_template.schema_management_mode = 'manual_access' 68 | else 69 | test_template.schema_management_mode = 'auto' 70 | end 71 | vtest.cluster_cfg(g, vtest.config_new(test_template)) 72 | g.replica_2_a:exec(function(bid, uuid) 73 | local ok, err = ivshard.storage.bucket_send(bid, uuid, 74 | {timeout = iwait_timeout}) 75 | ilt.assert_equals(err, nil) 76 | ilt.assert(ok) 77 | _G.bucket_gc_wait() 78 | end, {bid, g.replica_1_a:replicaset_uuid()}) 79 | end 80 | -------------------------------------------------------------------------------- /test/storage-luatest/storage_1_1_1_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vtest = require('test.luatest_helpers.vtest') 3 | local vutil = require('vshard.util') 4 | 5 | local group_config = {{engine = 'memtx'}, {engine = 'vinyl'}} 6 | 7 | if vutil.feature.memtx_mvcc then 8 | table.insert(group_config, { 9 | engine = 'memtx', memtx_use_mvcc_engine = true 10 | }) 11 | table.insert(group_config, { 12 | engine = 'vinyl', memtx_use_mvcc_engine = true 13 | }) 14 | end 15 | 16 | local test_group = t.group('storage', group_config) 17 | 18 | local cfg_template = { 19 | sharding = { 20 | { 21 | replicas = { 22 | replica_1_a = { 23 | master = true, 24 | }, 25 | }, 26 | }, 27 | { 28 | replicas = { 29 | replica_2_a = { 30 | master = true, 31 | }, 32 | }, 33 | }, 34 | { 35 | replicas = { 36 | replica_3_a = { 37 | master = true, 38 | }, 39 | }, 40 | }, 41 | }, 42 | bucket_count = 15, 43 | replication_timeout = 0.1, 44 | } 45 | local global_cfg 46 | 47 | test_group.before_all(function(g) 48 | cfg_template.memtx_use_mvcc_engine = g.params.memtx_use_mvcc_engine 49 | global_cfg = vtest.config_new(cfg_template) 50 | 51 | vtest.cluster_new(g, global_cfg) 52 | vtest.cluster_bootstrap(g, global_cfg) 53 | vtest.cluster_rebalancer_disable(g) 54 | end) 55 | 56 | test_group.after_all(function(g) 57 | g.cluster:drop() 58 | end) 59 | 60 | -- 61 | -- Test that manual vshard.storage.bucket_send() cannot lead to 62 | -- doubled buckets (gh-414). 63 | -- 64 | test_group.test_manual_bucket_send_doubled_buckets = function(g) 65 | vtest.cluster_exec_each_master(g, function() 66 | _G.bucket_recovery_pause() 67 | end) 68 | 69 | local uuid_2 = g.replica_2_a:exec(function() 70 | ivshard.storage.internal.errinj.ERRINJ_LONG_RECEIVE = true 71 | return ivutil.replicaset_uuid() 72 | end) 73 | 74 | local bid = g.replica_1_a:exec(function(uuid) 75 | local bid = _G.get_first_bucket() 76 | local ok, err = ivshard.storage.bucket_send(bid, uuid) 77 | ilt.assert(iverror.is_timeout(err)) 78 | ilt.assert_not(ok, 'bucket_send not ok') 79 | return bid 80 | end, {uuid_2}) 81 | 82 | g.replica_2_a:exec(function(bid, uuid) 83 | ivshard.storage.internal.errinj.ERRINJ_LONG_RECEIVE = false 84 | ilt.assert_equals(box.space._bucket:get(bid).status, 85 | ivconst.BUCKET.ACTIVE) 86 | local ok, err = ivshard.storage.bucket_send(bid, uuid) 87 | ilt.assert_equals(err, nil, 'bucket_send no error') 88 | ilt.assert(ok, 'bucket_send ok') 89 | _G.bucket_recovery_continue() 90 | end, {bid, g.replica_3_a:replicaset_uuid()}) 91 | 92 | g.replica_3_a:exec(function(bid) 93 | ilt.assert_equals(box.space._bucket:get(bid).status, 94 | ivconst.BUCKET.ACTIVE) 95 | end, {bid}) 96 | 97 | g.replica_1_a:exec(function(bid) 98 | _G.bucket_recovery_continue() 99 | _G.bucket_recovery_wait() 100 | _G.bucket_gc_wait() 101 | ilt.assert_equals(box.space._bucket:get(bid), nil) 102 | end, {bid}) 103 | end 104 | -------------------------------------------------------------------------------- /test/storage-luatest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = luatest 3 | description = Storage tests 4 | is_parallel = True 5 | release_disabled = 6 | -------------------------------------------------------------------------------- /test/storage/cfg_after_box.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | -- 5 | -- gh-110: Tarantool has a bug: when box.cfg{} is called with 6 | -- no arguments, then 7 | -- box.cfg{instance/replicaset_uuid = box.info.uuid/cluster.uuid} 8 | -- does not work. 9 | -- Vshard must not include these uuids in config when it is called 10 | -- not the first time. 11 | -- 12 | test_run:cmd("create server storage_1_1 with script='storage/storage_1_1.lua'") 13 | --- 14 | - true 15 | ... 16 | test_run:cmd("start server storage_1_1") 17 | --- 18 | - true 19 | ... 20 | test_run:switch('storage_1_1') 21 | --- 22 | - true 23 | ... 24 | vshard.storage.cfg(cfg, instance_uuid) 25 | --- 26 | ... 27 | test_run:switch('default') 28 | --- 29 | - true 30 | ... 31 | test_run:cmd("stop server storage_1_1") 32 | --- 33 | - true 34 | ... 35 | test_run:cmd("cleanup server storage_1_1") 36 | --- 37 | - true 38 | ... 39 | test_run:cmd("create server storage_1_2 with script='storage/storage_1_2.lua'") 40 | --- 41 | - true 42 | ... 43 | test_run:cmd("start server storage_1_2") 44 | --- 45 | - true 46 | ... 47 | test_run:switch('storage_1_2') 48 | --- 49 | - true 50 | ... 51 | util.check_error(vshard.storage.cfg, cfg, instance_uuid) 52 | --- 53 | - 'Instance UUID mismatch: already set "8a274925-a26d-47fc-9e1b-af88ce939412" but 54 | "8a274925-a26d-47fc-9e1b-af88ce000000" in arguments' 55 | ... 56 | test_run:switch('default') 57 | --- 58 | - true 59 | ... 60 | test_run:cmd("stop server storage_1_2") 61 | --- 62 | - true 63 | ... 64 | test_run:cmd("cleanup server storage_1_2") 65 | --- 66 | - true 67 | ... 68 | test_run:cmd("create server storage_1_3 with script='storage/storage_1_3.lua'") 69 | --- 70 | - true 71 | ... 72 | test_run:cmd("start server storage_1_3") 73 | --- 74 | - true 75 | ... 76 | test_run:switch('storage_1_3') 77 | --- 78 | - true 79 | ... 80 | util.check_error(vshard.storage.cfg, cfg, instance_uuid) 81 | --- 82 | - 'Replicaset UUID mismatch: already set "8a274925-a26d-47fc-9e1b-af88ce939412" but 83 | "8a274925-a26d-47fc-9e1b-af88ce000000" in vshard config' 84 | ... 85 | test_run:switch('default') 86 | --- 87 | - true 88 | ... 89 | test_run:cmd("stop server storage_1_3") 90 | --- 91 | - true 92 | ... 93 | test_run:cmd("cleanup server storage_1_3") 94 | --- 95 | - true 96 | ... 97 | -------------------------------------------------------------------------------- /test/storage/cfg_after_box.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | 3 | -- 4 | -- gh-110: Tarantool has a bug: when box.cfg{} is called with 5 | -- no arguments, then 6 | -- box.cfg{instance/replicaset_uuid = box.info.uuid/cluster.uuid} 7 | -- does not work. 8 | -- Vshard must not include these uuids in config when it is called 9 | -- not the first time. 10 | -- 11 | test_run:cmd("create server storage_1_1 with script='storage/storage_1_1.lua'") 12 | test_run:cmd("start server storage_1_1") 13 | test_run:switch('storage_1_1') 14 | vshard.storage.cfg(cfg, instance_uuid) 15 | test_run:switch('default') 16 | test_run:cmd("stop server storage_1_1") 17 | test_run:cmd("cleanup server storage_1_1") 18 | 19 | test_run:cmd("create server storage_1_2 with script='storage/storage_1_2.lua'") 20 | test_run:cmd("start server storage_1_2") 21 | test_run:switch('storage_1_2') 22 | util.check_error(vshard.storage.cfg, cfg, instance_uuid) 23 | test_run:switch('default') 24 | test_run:cmd("stop server storage_1_2") 25 | test_run:cmd("cleanup server storage_1_2") 26 | 27 | test_run:cmd("create server storage_1_3 with script='storage/storage_1_3.lua'") 28 | test_run:cmd("start server storage_1_3") 29 | test_run:switch('storage_1_3') 30 | util.check_error(vshard.storage.cfg, cfg, instance_uuid) 31 | test_run:switch('default') 32 | test_run:cmd("stop server storage_1_3") 33 | test_run:cmd("cleanup server storage_1_3") 34 | -------------------------------------------------------------------------------- /test/storage/demote_sync_errinj.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'storage') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'storage') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | test_run:switch('storage_1_a') 26 | --- 27 | - true 28 | ... 29 | fiber = require('fiber') 30 | --- 31 | ... 32 | s = box.schema.create_space('test') 33 | --- 34 | ... 35 | pk = s:create_index('pk') 36 | --- 37 | ... 38 | vshard.storage.internal.errinj.ERRINJ_CFG_DELAY = true 39 | --- 40 | ... 41 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 42 | --- 43 | ... 44 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 45 | --- 46 | ... 47 | f = fiber.create(function() vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) end) 48 | --- 49 | ... 50 | f:status() 51 | --- 52 | - suspended 53 | ... 54 | -- Can not write - read only mode is already on. 55 | ok, err = pcall(s.replace, s, {1}) 56 | --- 57 | ... 58 | assert(not ok and err.code == box.error.READONLY) 59 | --- 60 | - true 61 | ... 62 | test_run:switch('storage_1_b') 63 | --- 64 | - true 65 | ... 66 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 67 | --- 68 | ... 69 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 70 | --- 71 | ... 72 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 73 | --- 74 | ... 75 | box.space.test:select{} 76 | --- 77 | - [] 78 | ... 79 | test_run:switch('storage_1_a') 80 | --- 81 | - true 82 | ... 83 | vshard.storage.internal.errinj.ERRINJ_CFG_DELAY = false 84 | --- 85 | ... 86 | while f:status() ~= 'dead' do fiber.sleep(0.1) end 87 | --- 88 | ... 89 | s:select{} 90 | --- 91 | - [] 92 | ... 93 | test_run:switch('storage_1_b') 94 | --- 95 | - true 96 | ... 97 | box.space.test:select{} 98 | --- 99 | - [] 100 | ... 101 | box.space.test:drop() 102 | --- 103 | ... 104 | test_run:cmd("switch default") 105 | --- 106 | - true 107 | ... 108 | test_run:drop_cluster(REPLICASET_2) 109 | --- 110 | ... 111 | test_run:drop_cluster(REPLICASET_1) 112 | --- 113 | ... 114 | -------------------------------------------------------------------------------- /test/storage/demote_sync_errinj.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | test_run:create_cluster(REPLICASET_1, 'storage') 5 | test_run:create_cluster(REPLICASET_2, 'storage') 6 | util = require('util') 7 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 8 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 9 | 10 | test_run:switch('storage_1_a') 11 | fiber = require('fiber') 12 | s = box.schema.create_space('test') 13 | pk = s:create_index('pk') 14 | vshard.storage.internal.errinj.ERRINJ_CFG_DELAY = true 15 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 16 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 17 | f = fiber.create(function() vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) end) 18 | f:status() 19 | -- Can not write - read only mode is already on. 20 | ok, err = pcall(s.replace, s, {1}) 21 | assert(not ok and err.code == box.error.READONLY) 22 | 23 | test_run:switch('storage_1_b') 24 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 25 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 26 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 27 | box.space.test:select{} 28 | 29 | test_run:switch('storage_1_a') 30 | vshard.storage.internal.errinj.ERRINJ_CFG_DELAY = false 31 | while f:status() ~= 'dead' do fiber.sleep(0.1) end 32 | s:select{} 33 | 34 | test_run:switch('storage_1_b') 35 | box.space.test:select{} 36 | box.space.test:drop() 37 | 38 | test_run:cmd("switch default") 39 | test_run:drop_cluster(REPLICASET_2) 40 | test_run:drop_cluster(REPLICASET_1) 41 | -------------------------------------------------------------------------------- /test/storage/engine.cfg: -------------------------------------------------------------------------------- 1 | { 2 | "storage.test.lua": { 3 | "memtx": {"engine": "memtx"}, 4 | "vinyl": {"engine": "vinyl"} 5 | }, 6 | "garbage_collector.test.lua": { 7 | "memtx": {"engine": "memtx"}, 8 | "vinyl": {"engine": "vinyl"} 9 | }, 10 | "demote_sync_errinj.test.lua": { 11 | "memtx": {"engine": "memtx"}, 12 | "vinyl": {"engine": "vinyl"} 13 | }, 14 | "read_only_slave.test.lua": { 15 | "memtx": {"engine": "memtx"}, 16 | "vinyl": {"engine": "vinyl"} 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /test/storage/read_only_slave.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'storage') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'storage') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 26 | --- 27 | ... 28 | _ = test_run:switch('storage_2_a') 29 | --- 30 | ... 31 | vshard.storage.rebalancer_disable() 32 | --- 33 | ... 34 | _ = test_run:switch('storage_1_a') 35 | --- 36 | ... 37 | box.cfg.read_only 38 | --- 39 | - false 40 | ... 41 | ok = nil 42 | --- 43 | ... 44 | err = nil 45 | --- 46 | ... 47 | function on_master_enable() box.space.test:replace{1, 1} end 48 | --- 49 | ... 50 | -- Test, that in disable trigger already can not write. 51 | function on_master_disable() ok, err = pcall(box.space.test.replace, box.space.test, {2, 2}) end 52 | --- 53 | ... 54 | _ = vshard.storage.on_master_enable(on_master_enable) 55 | --- 56 | ... 57 | _ = vshard.storage.on_master_disable(on_master_disable) 58 | --- 59 | ... 60 | box.space.test:select{} 61 | --- 62 | - - [1, 1] 63 | ... 64 | _ = test_run:switch('storage_1_b') 65 | --- 66 | ... 67 | box.cfg.read_only 68 | --- 69 | - true 70 | ... 71 | ok, err = pcall(box.schema.create_space, 'test3') 72 | --- 73 | ... 74 | assert(not ok and err.code == box.error.READONLY) 75 | --- 76 | - true 77 | ... 78 | fiber = require('fiber') 79 | --- 80 | ... 81 | function on_master_enable() box.space.test:replace{3, 3} end 82 | --- 83 | ... 84 | function on_master_disable() if not box.cfg.read_only then box.space.test:replace{4, 4} end end 85 | --- 86 | ... 87 | _ = vshard.storage.on_master_enable(on_master_enable) 88 | --- 89 | ... 90 | _ = vshard.storage.on_master_disable(on_master_disable) 91 | --- 92 | ... 93 | -- Yes, there is no 3 or 4, because a trigger on disable always 94 | -- works in readonly. 95 | box.space.test:select{} 96 | --- 97 | - - [1, 1] 98 | ... 99 | -- Check that after master change the read_only is updated, and 100 | -- that triggers on master role switch can change spaces. 101 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 102 | --- 103 | ... 104 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 105 | --- 106 | ... 107 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 108 | --- 109 | ... 110 | box.cfg.read_only 111 | --- 112 | - false 113 | ... 114 | box.space.test:select{} 115 | --- 116 | - - [1, 1] 117 | - [3, 3] 118 | ... 119 | _ = test_run:switch('storage_1_a') 120 | --- 121 | ... 122 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 123 | --- 124 | ... 125 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 126 | --- 127 | ... 128 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 129 | --- 130 | ... 131 | box.cfg.read_only 132 | --- 133 | - true 134 | ... 135 | assert(not ok and err.code == box.error.READONLY) 136 | --- 137 | - true 138 | ... 139 | fiber = require('fiber') 140 | --- 141 | ... 142 | while box.space.test:count() ~= 2 do fiber.sleep(0.1) end 143 | --- 144 | ... 145 | box.space.test:select{} 146 | --- 147 | - - [1, 1] 148 | - [3, 3] 149 | ... 150 | _ = test_run:switch('storage_1_b') 151 | --- 152 | ... 153 | box.space.test:drop() 154 | --- 155 | ... 156 | _ = test_run:cmd("switch default") 157 | --- 158 | ... 159 | test_run:drop_cluster(REPLICASET_2) 160 | --- 161 | ... 162 | test_run:drop_cluster(REPLICASET_1) 163 | --- 164 | ... 165 | -------------------------------------------------------------------------------- /test/storage/read_only_slave.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | test_run:create_cluster(REPLICASET_1, 'storage') 5 | test_run:create_cluster(REPLICASET_2, 'storage') 6 | util = require('util') 7 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 8 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 9 | util.map_evals(test_run, {REPLICASET_1, REPLICASET_2}, 'bootstrap_storage(\'memtx\')') 10 | 11 | _ = test_run:switch('storage_2_a') 12 | vshard.storage.rebalancer_disable() 13 | _ = test_run:switch('storage_1_a') 14 | box.cfg.read_only 15 | ok = nil 16 | err = nil 17 | function on_master_enable() box.space.test:replace{1, 1} end 18 | -- Test, that in disable trigger already can not write. 19 | function on_master_disable() ok, err = pcall(box.space.test.replace, box.space.test, {2, 2}) end 20 | _ = vshard.storage.on_master_enable(on_master_enable) 21 | _ = vshard.storage.on_master_disable(on_master_disable) 22 | box.space.test:select{} 23 | 24 | _ = test_run:switch('storage_1_b') 25 | box.cfg.read_only 26 | ok, err = pcall(box.schema.create_space, 'test3') 27 | assert(not ok and err.code == box.error.READONLY) 28 | fiber = require('fiber') 29 | function on_master_enable() box.space.test:replace{3, 3} end 30 | function on_master_disable() if not box.cfg.read_only then box.space.test:replace{4, 4} end end 31 | _ = vshard.storage.on_master_enable(on_master_enable) 32 | _ = vshard.storage.on_master_disable(on_master_disable) 33 | -- Yes, there is no 3 or 4, because a trigger on disable always 34 | -- works in readonly. 35 | box.space.test:select{} 36 | 37 | -- Check that after master change the read_only is updated, and 38 | -- that triggers on master role switch can change spaces. 39 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 40 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 41 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_b) 42 | box.cfg.read_only 43 | box.space.test:select{} 44 | 45 | _ = test_run:switch('storage_1_a') 46 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_b].master = true 47 | cfg.sharding[util.replicasets[1]].replicas[util.name_to_uuid.storage_1_a].master = false 48 | vshard.storage.cfg(cfg, util.name_to_uuid.storage_1_a) 49 | box.cfg.read_only 50 | assert(not ok and err.code == box.error.READONLY) 51 | fiber = require('fiber') 52 | while box.space.test:count() ~= 2 do fiber.sleep(0.1) end 53 | box.space.test:select{} 54 | 55 | _ = test_run:switch('storage_1_b') 56 | box.space.test:drop() 57 | 58 | _ = test_run:cmd("switch default") 59 | test_run:drop_cluster(REPLICASET_2) 60 | test_run:drop_cluster(REPLICASET_1) 61 | -------------------------------------------------------------------------------- /test/storage/recovery_errinj.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 5 | --- 6 | ... 7 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 8 | --- 9 | ... 10 | test_run:create_cluster(REPLICASET_1, 'storage') 11 | --- 12 | ... 13 | test_run:create_cluster(REPLICASET_2, 'storage') 14 | --- 15 | ... 16 | util = require('util') 17 | --- 18 | ... 19 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 20 | --- 21 | ... 22 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 23 | --- 24 | ... 25 | util.push_rs_filters(test_run) 26 | --- 27 | ... 28 | -- 29 | -- Test timeout error during bucket sending, when on a destination 30 | -- bucket becomes active. 31 | -- 32 | _ = test_run:switch('storage_2_a') 33 | --- 34 | ... 35 | vshard.storage.internal.errinj.ERRINJ_LAST_RECEIVE_DELAY = true 36 | --- 37 | ... 38 | -- Pause recovery. Otherwise it does its job too fast and does not allow to 39 | -- simulate the intermediate state. 40 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = true 41 | --- 42 | ... 43 | _ = test_run:switch('default') 44 | --- 45 | ... 46 | util.map_bucket_protection(test_run, {REPLICASET_1}, false) 47 | --- 48 | ... 49 | _ = test_run:switch('storage_1_a') 50 | --- 51 | ... 52 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = true 53 | --- 54 | ... 55 | _bucket = box.space._bucket 56 | --- 57 | ... 58 | _bucket:replace{1, vshard.consts.BUCKET.ACTIVE, util.replicasets[2]} 59 | --- 60 | - [1, 'active', ''] 61 | ... 62 | vshard.storage.sync() 63 | --- 64 | - true 65 | ... 66 | _ = test_run:switch('default') 67 | --- 68 | ... 69 | util.map_bucket_protection(test_run, {REPLICASET_1}, true) 70 | --- 71 | ... 72 | _ = test_run:switch('storage_1_a') 73 | --- 74 | ... 75 | ret, err = vshard.storage.bucket_send(1, util.replicasets[2], {timeout = 0.1}) 76 | --- 77 | ... 78 | ret, util.is_timeout_error(err) 79 | --- 80 | - null 81 | - true 82 | ... 83 | _bucket = box.space._bucket 84 | --- 85 | ... 86 | wait_bucket_is_collected(1) 87 | --- 88 | ... 89 | _bucket:get{1} 90 | --- 91 | ... 92 | _ = test_run:switch('storage_2_a') 93 | --- 94 | ... 95 | vshard.storage.internal.errinj.ERRINJ_LAST_RECEIVE_DELAY = false 96 | --- 97 | ... 98 | _bucket = box.space._bucket 99 | --- 100 | ... 101 | while _bucket:get{1}.status ~= vshard.consts.BUCKET.ACTIVE do fiber.sleep(0.01) end 102 | --- 103 | ... 104 | _bucket:get{1} 105 | --- 106 | - [1, 'active'] 107 | ... 108 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = false 109 | --- 110 | ... 111 | _ = test_run:switch('storage_1_a') 112 | --- 113 | ... 114 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = false 115 | --- 116 | ... 117 | wait_bucket_is_collected(1) 118 | --- 119 | ... 120 | _ = test_run:switch("default") 121 | --- 122 | ... 123 | test_run:drop_cluster(REPLICASET_2) 124 | --- 125 | ... 126 | test_run:drop_cluster(REPLICASET_1) 127 | --- 128 | ... 129 | _ = test_run:cmd('clear filter') 130 | --- 131 | ... 132 | -------------------------------------------------------------------------------- /test/storage/recovery_errinj.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 3 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 4 | 5 | test_run:create_cluster(REPLICASET_1, 'storage') 6 | test_run:create_cluster(REPLICASET_2, 'storage') 7 | util = require('util') 8 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 9 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 10 | util.push_rs_filters(test_run) 11 | -- 12 | -- Test timeout error during bucket sending, when on a destination 13 | -- bucket becomes active. 14 | -- 15 | _ = test_run:switch('storage_2_a') 16 | vshard.storage.internal.errinj.ERRINJ_LAST_RECEIVE_DELAY = true 17 | -- Pause recovery. Otherwise it does its job too fast and does not allow to 18 | -- simulate the intermediate state. 19 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = true 20 | 21 | _ = test_run:switch('default') 22 | util.map_bucket_protection(test_run, {REPLICASET_1}, false) 23 | 24 | _ = test_run:switch('storage_1_a') 25 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = true 26 | _bucket = box.space._bucket 27 | _bucket:replace{1, vshard.consts.BUCKET.ACTIVE, util.replicasets[2]} 28 | vshard.storage.sync() 29 | 30 | _ = test_run:switch('default') 31 | util.map_bucket_protection(test_run, {REPLICASET_1}, true) 32 | 33 | _ = test_run:switch('storage_1_a') 34 | ret, err = vshard.storage.bucket_send(1, util.replicasets[2], {timeout = 0.1}) 35 | ret, util.is_timeout_error(err) 36 | _bucket = box.space._bucket 37 | wait_bucket_is_collected(1) 38 | _bucket:get{1} 39 | 40 | _ = test_run:switch('storage_2_a') 41 | vshard.storage.internal.errinj.ERRINJ_LAST_RECEIVE_DELAY = false 42 | _bucket = box.space._bucket 43 | while _bucket:get{1}.status ~= vshard.consts.BUCKET.ACTIVE do fiber.sleep(0.01) end 44 | _bucket:get{1} 45 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = false 46 | 47 | _ = test_run:switch('storage_1_a') 48 | vshard.storage.internal.errinj.ERRINJ_RECOVERY_PAUSE = false 49 | wait_bucket_is_collected(1) 50 | 51 | _ = test_run:switch("default") 52 | test_run:drop_cluster(REPLICASET_2) 53 | test_run:drop_cluster(REPLICASET_1) 54 | _ = test_run:cmd('clear filter') 55 | -------------------------------------------------------------------------------- /test/storage/storage_1_1.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | -- Get instance name 4 | local name = require('fio').basename(arg[0], '.lua') 5 | -- Check if we are running under test-run 6 | if os.getenv('ADMIN') then 7 | test_run = require('test_run').new() 8 | require('console').listen(os.getenv('ADMIN')) 9 | end 10 | util = require('util') 11 | vutil = require('vshard.util') 12 | 13 | instance_uuid = nil 14 | replicaset_uuid = nil 15 | if name == 'storage_1_1' then 16 | box.cfg{} 17 | instance_uuid = box.info.uuid 18 | replicaset_uuid = vutil.replicaset_uuid() 19 | elseif name == 'storage_1_2' then 20 | box.cfg{instance_uuid = '8a274925-a26d-47fc-9e1b-af88ce939412'} 21 | instance_uuid = '8a274925-a26d-47fc-9e1b-af88ce000000' 22 | replicaset_uuid = vutil.replicaset_uuid() 23 | elseif name == 'storage_1_3' then 24 | box.cfg{replicaset_uuid = '8a274925-a26d-47fc-9e1b-af88ce939412'} 25 | instance_uuid = box.info.uuid 26 | replicaset_uuid = '8a274925-a26d-47fc-9e1b-af88ce000000' 27 | else 28 | assert(false) 29 | end 30 | 31 | cfg = { 32 | sharding = { 33 | [replicaset_uuid] = { 34 | replicas = { 35 | [instance_uuid] = { 36 | uri = 'storage:storage@127.0.0.1:3301', 37 | name = 'storage_1_1', 38 | master = true 39 | } 40 | } 41 | } 42 | }, 43 | replication_connect_timeout = 0.01, 44 | replication_connect_quorum = 0, 45 | } 46 | 47 | vshard = require('vshard') 48 | -------------------------------------------------------------------------------- /test/storage/storage_1_2.lua: -------------------------------------------------------------------------------- 1 | storage_1_1.lua -------------------------------------------------------------------------------- /test/storage/storage_1_3.lua: -------------------------------------------------------------------------------- 1 | storage_1_1.lua -------------------------------------------------------------------------------- /test/storage/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/storage/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/storage/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/storage/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | ../lua_libs/storage_template.lua -------------------------------------------------------------------------------- /test/storage/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Storage tests 4 | script = test.lua 5 | config = engine.cfg 6 | is_parallel = False 7 | release_disabled = recovery_errinj.test.lua demote_sync_errinj.test.lua 8 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 9 | ../../example/localcfg.lua 10 | -------------------------------------------------------------------------------- /test/storage/sync.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | netbox = require('net.box') 5 | --- 6 | ... 7 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 8 | --- 9 | ... 10 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 11 | --- 12 | ... 13 | engine = test_run:get_cfg('engine') 14 | --- 15 | ... 16 | test_run:create_cluster(REPLICASET_1, 'storage') 17 | --- 18 | ... 19 | test_run:create_cluster(REPLICASET_2, 'storage') 20 | --- 21 | ... 22 | util = require('util') 23 | --- 24 | ... 25 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 26 | --- 27 | ... 28 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 29 | --- 30 | ... 31 | _ = test_run:switch('storage_1_a') 32 | --- 33 | ... 34 | s = box.schema.create_space('test') 35 | --- 36 | ... 37 | _ = s:create_index('pk') 38 | --- 39 | ... 40 | vshard.storage.sync(0.5) 41 | --- 42 | - true 43 | ... 44 | _ = test_run:cmd('stop server storage_1_b') 45 | --- 46 | ... 47 | s:replace{1} 48 | --- 49 | - [1] 50 | ... 51 | box.info.replication[2].downstream.status 52 | --- 53 | - stopped 54 | ... 55 | ok, err = vshard.storage.sync(0.5) 56 | --- 57 | ... 58 | ok, err.code == box.error.TIMEOUT or err 59 | --- 60 | - null 61 | - true 62 | ... 63 | _ = test_run:cmd('start server storage_1_b') 64 | --- 65 | ... 66 | vshard.storage.sync(1) 67 | --- 68 | - true 69 | ... 70 | _ = test_run:switch("default") 71 | --- 72 | ... 73 | test_run:drop_cluster(REPLICASET_2) 74 | --- 75 | ... 76 | test_run:drop_cluster(REPLICASET_1) 77 | --- 78 | ... 79 | -------------------------------------------------------------------------------- /test/storage/sync.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | netbox = require('net.box') 3 | REPLICASET_1 = { 'storage_1_a', 'storage_1_b' } 4 | REPLICASET_2 = { 'storage_2_a', 'storage_2_b' } 5 | engine = test_run:get_cfg('engine') 6 | test_run:create_cluster(REPLICASET_1, 'storage') 7 | test_run:create_cluster(REPLICASET_2, 'storage') 8 | util = require('util') 9 | util.wait_master(test_run, REPLICASET_1, 'storage_1_a') 10 | util.wait_master(test_run, REPLICASET_2, 'storage_2_a') 11 | 12 | _ = test_run:switch('storage_1_a') 13 | s = box.schema.create_space('test') 14 | _ = s:create_index('pk') 15 | 16 | vshard.storage.sync(0.5) 17 | 18 | _ = test_run:cmd('stop server storage_1_b') 19 | s:replace{1} 20 | box.info.replication[2].downstream.status 21 | ok, err = vshard.storage.sync(0.5) 22 | ok, err.code == box.error.TIMEOUT or err 23 | 24 | _ = test_run:cmd('start server storage_1_b') 25 | 26 | vshard.storage.sync(1) 27 | 28 | _ = test_run:switch("default") 29 | test_run:drop_cluster(REPLICASET_2) 30 | test_run:drop_cluster(REPLICASET_1) 31 | -------------------------------------------------------------------------------- /test/storage/test.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/test-run.py: -------------------------------------------------------------------------------- 1 | ../test-run/test-run.py -------------------------------------------------------------------------------- /test/unit-luatest/error_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local vutil = require('vshard.util') 3 | local verror = require('vshard.error') 4 | 5 | local g = t.group('error') 6 | 7 | g.test_box_error_prev = function() 8 | t.run_only_if(vutil.feature.error_stack) 9 | 10 | local code = box.error.PROC_LUA 11 | local e1 = box.error.new(code, 'err1') 12 | local e2 = box.error.new(code, 'err2') 13 | local e3 = box.error.new(code, 'err3') 14 | e1:set_prev(e2) 15 | e2:set_prev(e3) 16 | 17 | local ve1 = verror.box(e1) 18 | local ve2 = ve1.prev 19 | ve1.prev = nil 20 | local ve3 = ve2.prev 21 | ve2.prev = nil 22 | t.assert_type(ve1, 'table') 23 | t.assert_type(ve2, 'table') 24 | t.assert_type(ve3, 'table') 25 | 26 | e1 = e1:unpack() 27 | e1.prev = nil 28 | e2 = e2:unpack() 29 | e2.prev = nil 30 | e3 = e3:unpack() 31 | 32 | t.assert_equals(e1, ve1) 33 | t.assert_equals(e2, ve2) 34 | t.assert_equals(e3, ve3) 35 | end 36 | -------------------------------------------------------------------------------- /test/unit-luatest/service_info_test.lua: -------------------------------------------------------------------------------- 1 | local t = require('luatest') 2 | local fiber = require('fiber') 3 | local vinfo = require('vshard.service_info') 4 | local vtest = require('test.luatest_helpers.vtest') 5 | 6 | local g = t.group('service_info') 7 | 8 | local function make_service_default(service) 9 | service.status_idx = 0 10 | service:set_activity('activity_1') 11 | service:set_status_error('error_1') 12 | end 13 | 14 | local function test_info_equals_default(service_info) 15 | t.assert_equals(service_info.status, 'error') 16 | t.assert_equals(service_info.status_idx, 1) 17 | t.assert_equals(service_info.activity, 'activity_1') 18 | t.assert_equals(service_info.error, 'error_1') 19 | end 20 | 21 | g.before_all(function(g) 22 | g.default_service = vinfo.new() 23 | end) 24 | 25 | g.before_each(function(g) 26 | -- Restore service to default values 27 | make_service_default(g.default_service) 28 | end) 29 | 30 | g.test_basic = function(g) 31 | local info = g.default_service:info() 32 | -- Test that a copy of data is returned and initial one cannot 33 | -- be changed via the returned value of service_info:info(). 34 | info.status = 'some status' 35 | info = g.default_service:info() 36 | test_info_equals_default(info) 37 | 38 | -- Cannot set a new error as a first one was already set and 39 | -- the iteration wasn't reset (next_iter()). Only first occurred 40 | -- error on every service iteration must be saved. 41 | g.default_service:set_status_error('error_2') 42 | info = g.default_service:info() 43 | t.assert_equals(info.error, 'error_1') 44 | t.assert_equals(info.status_idx, 1) 45 | 46 | -- Let's assume, that on the second iteration (after resetting with 47 | -- next_iter()) 'error_1' didn't occur, but 'error_2' did. 48 | g.default_service:next_iter() 49 | g.default_service:set_status_error('error_2') 50 | info = g.default_service:info() 51 | t.assert_equals(info.error, 'error_2') 52 | t.assert_equals(info.status_idx, 2) 53 | 54 | -- Drop error, everyhting is all right 55 | g.default_service:set_status_ok() 56 | info = g.default_service:info() 57 | t.assert_equals(info.status, 'ok') 58 | t.assert_equals(info.status_idx, 3) 59 | t.assert_equals(info.error, '') 60 | end 61 | 62 | g.test_helpers = function(g) 63 | -- Scan for already occurred error 64 | vtest.service_wait_for_error(g.default_service, 'error_1') 65 | 66 | -- Wait for the new error (first occurred is the requested one) 67 | g.default_service:set_status_ok() 68 | fiber.new(function(service) 69 | service:set_status_error('error_3') 70 | end, g.default_service) 71 | vtest.service_wait_for_new_error(g.default_service, 'error_3') 72 | 73 | -- Wait until everything is good 74 | fiber.new(function(service) 75 | service:set_status_ok() 76 | end, g.default_service) 77 | vtest.service_wait_for_new_ok(g.default_service) 78 | end 79 | -------------------------------------------------------------------------------- /test/unit-luatest/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = luatest 3 | description = Unit tests 4 | is_parallel = True 5 | release_disabled = 6 | -------------------------------------------------------------------------------- /test/unit-tap/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = app 3 | description = Unit tests TAP 4 | is_parallel = True 5 | -------------------------------------------------------------------------------- /test/unit/box.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/unit/box2.lua: -------------------------------------------------------------------------------- 1 | box.lua -------------------------------------------------------------------------------- /test/unit/engine.cfg: -------------------------------------------------------------------------------- 1 | { 2 | "garbage.test.lua": { 3 | "memtx": {"engine": "memtx"}, 4 | "vinyl": {"engine": "vinyl"} 5 | }, 6 | "garbage_errinj.test.lua": { 7 | "memtx": {"engine": "memtx"}, 8 | "vinyl": {"engine": "vinyl"} 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/unit/error.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | vshard = require('vshard') 3 | util = require('util') 4 | json = require('json') 5 | lerror = vshard.error 6 | 7 | -- 8 | -- Test string representations of errors. 9 | -- 10 | ok, err = pcall(box.error, box.error.TIMEOUT) 11 | box_error = lerror.box(err) 12 | str = tostring(box_error) 13 | util.portable_error(json.decode(str)) 14 | 15 | vshard_error = lerror.vshard(lerror.code.UNREACHABLE_MASTER, 'uuid', 'reason') 16 | tostring(vshard_error) 17 | 18 | log = require('log') 19 | log.info('Log error: %s', vshard_error) 20 | test_run:grep_log('default', '"reason":"reason","code":11,"type":"ShardingError"') 21 | 22 | e = lerror.vshard(lerror.code.STORAGE_IS_DISABLED, 'any reason') 23 | e = lerror.from_string(tostring(e)) 24 | assert(e.code == lerror.code.STORAGE_IS_DISABLED) 25 | assert(e.type == 'ShardingError') 26 | assert(e.message == 'Storage is disabled: any reason') 27 | 28 | assert(not lerror.from_string('bad json')) 29 | assert(not lerror.from_string('100')) 30 | assert(not lerror.from_string('{"type": 100}')) 31 | assert(not lerror.from_string('{"type": "type", "code": "str"}')) 32 | assert(not lerror.from_string('{"type": "type", "code": 100, "message": 100}')) 33 | assert(lerror.from_string('{"type": "type", "code": 100, '.. \ 34 | '"message": "msg"}') ~= nil) 35 | 36 | -- 37 | -- Part of gh-100: check `error.vshard`. 38 | -- 39 | lerror.vshard(lerror.code.WRONG_BUCKET, 1, 'arg2', 'arg3') 40 | -- Pass an arg of a wrong type. 41 | util.check_error(lerror.vshard, lerror.code.WRONG_BUCKET, 'arg1', 'arg2', 100) 42 | -- Pass less args than msg requires. 43 | util.check_error(lerror.vshard, lerror.code.MISSING_MASTER) 44 | -- Pass more args than `args` field contains. 45 | util.check_error(lerror.vshard, lerror.code.MISSING_MASTER, 'arg1', 'arg2') 46 | -- Pass wrong format code. 47 | util.check_error(lerror.vshard, 'Wrong format code', 'arg1', 'arg2') 48 | 49 | function raise_lua_err() assert(false) end 50 | ok, err = pcall(raise_lua_err) 51 | err = lerror.make(err) 52 | util.portable_error(err) 53 | 54 | -- 55 | -- lerror.timeout() - portable alternative to box.error.new(box.error.TIMEOUT). 56 | -- 57 | err = lerror.timeout() 58 | type(err) 59 | assert(err.code == box.error.TIMEOUT) 60 | err.type 61 | err.message 62 | -------------------------------------------------------------------------------- /test/unit/reload_evolution.result: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | --- 3 | ... 4 | fiber = require('fiber') 5 | --- 6 | ... 7 | log = require('log') 8 | --- 9 | ... 10 | util = require('util') 11 | --- 12 | ... 13 | reload_evolution = require('vshard.storage.reload_evolution') 14 | --- 15 | ... 16 | -- Init with the latest version. 17 | fake_M = { reload_version = reload_evolution.version } 18 | --- 19 | ... 20 | -- Test reload to the same version. 21 | reload_evolution.upgrade(fake_M) 22 | --- 23 | ... 24 | test_run:grep_log('default', 'vshard.storage.evolution') == nil 25 | --- 26 | - true 27 | ... 28 | -- Test downgrage version. 29 | log.info(string.rep('a', 1000)) 30 | --- 31 | ... 32 | fake_M.reload_version = fake_M.reload_version + 1 33 | --- 34 | ... 35 | err = util.check_error(reload_evolution.upgrade, fake_M) 36 | --- 37 | ... 38 | err:match('auto%-downgrade is not implemented') 39 | --- 40 | - auto-downgrade is not implemented 41 | ... 42 | test_run:grep_log('default', 'vshard.storage.evolution', 1000) ~= nil 43 | --- 44 | - false 45 | ... 46 | -------------------------------------------------------------------------------- /test/unit/reload_evolution.test.lua: -------------------------------------------------------------------------------- 1 | test_run = require('test_run').new() 2 | fiber = require('fiber') 3 | log = require('log') 4 | util = require('util') 5 | reload_evolution = require('vshard.storage.reload_evolution') 6 | -- Init with the latest version. 7 | fake_M = { reload_version = reload_evolution.version } 8 | 9 | -- Test reload to the same version. 10 | reload_evolution.upgrade(fake_M) 11 | test_run:grep_log('default', 'vshard.storage.evolution') == nil 12 | 13 | -- Test downgrage version. 14 | log.info(string.rep('a', 1000)) 15 | fake_M.reload_version = fake_M.reload_version + 1 16 | err = util.check_error(reload_evolution.upgrade, fake_M) 17 | err:match('auto%-downgrade is not implemented') 18 | test_run:grep_log('default', 'vshard.storage.evolution', 1000) ~= nil 19 | -------------------------------------------------------------------------------- /test/unit/rlist.result: -------------------------------------------------------------------------------- 1 | -- test-run result file version 2 2 | -- 3 | -- gh-161: parallel rebalancer. One of the most important part of the latter is 4 | -- a dispenser. It is a structure which hands out destination UUIDs in a 5 | -- round-robin manner to worker fibers. It uses rlist data structure. 6 | -- 7 | rlist = require('vshard.rlist') 8 | | --- 9 | | ... 10 | 11 | list = rlist.new() 12 | | --- 13 | | ... 14 | list 15 | | --- 16 | | - count: 0 17 | | ... 18 | 19 | obj1 = {i = 1} 20 | | --- 21 | | ... 22 | list:remove(obj1) 23 | | --- 24 | | ... 25 | list 26 | | --- 27 | | - count: 0 28 | | ... 29 | 30 | list:add_tail(obj1) 31 | | --- 32 | | ... 33 | list 34 | | --- 35 | | - count: 1 36 | | last: &0 37 | | i: 1 38 | | first: *0 39 | | ... 40 | 41 | list:remove(obj1) 42 | | --- 43 | | ... 44 | list 45 | | --- 46 | | - count: 0 47 | | ... 48 | obj1 49 | | --- 50 | | - i: 1 51 | | ... 52 | 53 | list:add_tail(obj1) 54 | | --- 55 | | ... 56 | obj2 = {i = 2} 57 | | --- 58 | | ... 59 | list:add_tail(obj2) 60 | | --- 61 | | ... 62 | list 63 | | --- 64 | | - count: 2 65 | | last: &0 66 | | i: 2 67 | | prev: &1 68 | | i: 1 69 | | next: *0 70 | | first: *1 71 | | ... 72 | obj3 = {i = 3} 73 | | --- 74 | | ... 75 | list:add_tail(obj3) 76 | | --- 77 | | ... 78 | list 79 | | --- 80 | | - count: 3 81 | | last: &0 82 | | i: 3 83 | | prev: &1 84 | | i: 2 85 | | next: *0 86 | | prev: &2 87 | | i: 1 88 | | next: *1 89 | | first: *2 90 | | ... 91 | 92 | list:remove(obj2) 93 | | --- 94 | | ... 95 | list 96 | | --- 97 | | - count: 2 98 | | last: &0 99 | | i: 3 100 | | prev: &1 101 | | i: 1 102 | | next: *0 103 | | first: *1 104 | | ... 105 | list:remove(obj1) 106 | | --- 107 | | ... 108 | list 109 | | --- 110 | | - count: 1 111 | | last: &0 112 | | i: 3 113 | | first: *0 114 | | ... 115 | -------------------------------------------------------------------------------- /test/unit/rlist.test.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- gh-161: parallel rebalancer. One of the most important part of the latter is 3 | -- a dispenser. It is a structure which hands out destination UUIDs in a 4 | -- round-robin manner to worker fibers. It uses rlist data structure. 5 | -- 6 | rlist = require('vshard.rlist') 7 | 8 | list = rlist.new() 9 | list 10 | 11 | obj1 = {i = 1} 12 | list:remove(obj1) 13 | list 14 | 15 | list:add_tail(obj1) 16 | list 17 | 18 | list:remove(obj1) 19 | list 20 | obj1 21 | 22 | list:add_tail(obj1) 23 | obj2 = {i = 2} 24 | list:add_tail(obj2) 25 | list 26 | obj3 = {i = 3} 27 | list:add_tail(obj3) 28 | list 29 | 30 | list:remove(obj2) 31 | list 32 | list:remove(obj1) 33 | list 34 | -------------------------------------------------------------------------------- /test/unit/router.test.lua: -------------------------------------------------------------------------------- 1 | ffi = require('ffi') 2 | util = require('util') 3 | vshard = require('vshard') 4 | vshard.router.cfg({sharding = {}}) 5 | 6 | -- 7 | -- gh-207: vshard.router.bucket_id() was not consistent when 8 | -- values were cdata numbers. 9 | -- 10 | 11 | function check_values(bid, values) \ 12 | local result = {} \ 13 | for _, v in pairs(values) do \ 14 | local v1 = bid(v) \ 15 | local v2 = bid(v) \ 16 | local t = type(v) \ 17 | if t == 'cdata' then \ 18 | t = ffi.typeof(v) \ 19 | end \ 20 | if v1 ~= v2 then \ 21 | table.insert(result, {'not stable', {t, v, v1, v2}}) \ 22 | else \ 23 | table.insert(result, {t, v, v1}) \ 24 | end \ 25 | end \ 26 | return result \ 27 | end 28 | 29 | util.check_error(vshard.router.bucket_id) 30 | 31 | values = {1, 1LL, 1ULL, ffi.cast('uint64_t', 1), ffi.cast('int64_t', 1), '1'} 32 | 33 | check_values(vshard.router.bucket_id, values) 34 | check_values(vshard.router.bucket_id_strcrc32, values) 35 | 36 | -- Floating point cdata is not tested in strcrc32, because 37 | -- tostring() is not stable on them, and returns a pointer string. 38 | table.insert(values, ffi.cast('double', 1)) 39 | table.insert(values, ffi.cast('float', 1)) 40 | check_values(vshard.router.bucket_id_mpcrc32, values) 41 | 42 | -- Decimal is not available in 1.10, but vshard and its tests 43 | -- should work on 1.10. Decimal test is optional. 44 | has_decimal, decimal = pcall(require, 'decimal') 45 | 46 | not has_decimal or vshard.router.bucket_id_mpcrc32(decimal.new(1)) == 1696 or \ 47 | vshard.router.bucket_id_mpcrc32(decimal.new(1)) 48 | 49 | vshard.router.bucket_count() 50 | -------------------------------------------------------------------------------- /test/unit/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Unit tests 4 | script = box.lua 5 | config = engine.cfg 6 | lua_libs = ../lua_libs/util.lua ../lua_libs/git_util.lua 7 | is_parallel = True 8 | release_disabled = garbage_errinj.test.lua 9 | -------------------------------------------------------------------------------- /test/upgrade/box.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | 3 | require('strict').on() 4 | 5 | box.cfg{ 6 | listen = os.getenv("LISTEN"), 7 | } 8 | 9 | require('console').listen(os.getenv('ADMIN')) 10 | -------------------------------------------------------------------------------- /test/upgrade/storage_1_a.lua: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env tarantool 2 | util = require('util') 3 | NAME = require('fio').basename(arg[0], '.lua') 4 | local source_path = arg[1] 5 | if source_path then 6 | -- Run one storage on a different vshard 7 | -- version. 8 | package.path = string.format('%s/?.lua;%s/?/init.lua;%s', source_path, 9 | source_path, package.path) 10 | end 11 | require('storage_template') 12 | -------------------------------------------------------------------------------- /test/upgrade/storage_1_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/upgrade/storage_2_a.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/upgrade/storage_2_b.lua: -------------------------------------------------------------------------------- 1 | storage_1_a.lua -------------------------------------------------------------------------------- /test/upgrade/suite.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | core = tarantool 3 | description = Upgrade tests 4 | script = box.lua 5 | is_parallel = False 6 | lua_libs = ../lua_libs/storage_template.lua ../lua_libs/util.lua 7 | ../lua_libs/git_util.lua ../../example/localcfg.lua 8 | -------------------------------------------------------------------------------- /vshard-scm-1.rockspec: -------------------------------------------------------------------------------- 1 | package = 'vshard' 2 | version = 'scm-1' 3 | source = { 4 | url = 'git+https://github.com/tarantool/vshard.git', 5 | branch = 'master', 6 | } 7 | description = { 8 | summary = 'The new generation of sharding based on virtual buckets', 9 | homepage = 'https://github.com/tarantool/vshard.git', 10 | license = 'BSD', 11 | } 12 | dependencies = { 13 | 'lua ~> 5.1'; 14 | } 15 | 16 | external_dependencies = { 17 | TARANTOOL = { 18 | header = 'tarantool/module.h', 19 | }, 20 | } 21 | 22 | build = { 23 | type = 'cmake'; 24 | variables = { 25 | CMAKE_BUILD_TYPE="RelWithDebInfo"; 26 | TARANTOOL_DIR="$(TARANTOOL_DIR)"; 27 | TARANTOOL_INSTALL_LIBDIR="$(LIBDIR)"; 28 | TARANTOOL_INSTALL_LUADIR="$(LUADIR)"; 29 | }; 30 | } 31 | 32 | -- vim: syntax=lua 33 | -------------------------------------------------------------------------------- /vshard/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | if(APPLE) 2 | set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -undefined suppress -flat_namespace") 3 | endif(APPLE) 4 | 5 | add_subdirectory(storage) 6 | add_subdirectory(router) 7 | 8 | # Install module 9 | install(FILES cfg.lua error.lua consts.lua hash.lua init.lua replicaset.lua 10 | util.lua rlist.lua heap.lua registry.lua version.lua service_info.lua 11 | DESTINATION ${TARANTOOL_INSTALL_LUADIR}/vshard) 12 | -------------------------------------------------------------------------------- /vshard/consts.lua: -------------------------------------------------------------------------------- 1 | return { 2 | -- Сontains the module version. 3 | -- Requires manual update in case of release commit. 4 | VERSION = '0.1.33', 5 | 6 | -- Bucket FSM 7 | BUCKET = { 8 | ACTIVE = 'active', 9 | PINNED = 'pinned', 10 | SENDING = 'sending', 11 | SENT = 'sent', 12 | RECEIVING = 'receiving', 13 | GARBAGE = 'garbage', 14 | }, 15 | 16 | BUCKET_EVENT = { 17 | -- Txn triggers allow to attach anything to the bucket transaction. That 18 | -- was requested by a customer to be able to detect vshard book-keeping 19 | -- actions like bucket sending right in the journal. 20 | RECV = 'bucket_data_recv_txn', 21 | GC = 'bucket_data_gc_txn' 22 | }, 23 | 24 | STATUS = { 25 | GREEN = 0, 26 | YELLOW = 1, 27 | ORANGE = 2, 28 | RED = 3, 29 | }, 30 | 31 | REPLICATION_THRESHOLD_SOFT = 1, 32 | REPLICATION_THRESHOLD_HARD = 5, 33 | REPLICATION_THRESHOLD_FAIL = 10, 34 | 35 | DEFAULT_BUCKET_COUNT = 3000; 36 | BUCKET_SENT_GARBAGE_DELAY = 60; 37 | BUCKET_CHUNK_SIZE = 1000; 38 | LUA_CHUNK_SIZE = 100000, 39 | DEFAULT_REBALANCER_DISBALANCE_THRESHOLD = 1; 40 | REBALANCER_IDLE_INTERVAL = 60 * 60; 41 | REBALANCER_WORK_INTERVAL = 10; 42 | REBALANCER_CHUNK_TIMEOUT = 60 * 5; 43 | REBALANCER_GET_STATE_TIMEOUT = 5, 44 | REBALANCER_APPLY_ROUTES_TIMEOUT = 5, 45 | DEFAULT_REBALANCER_MAX_SENDING = 1; 46 | REBALANCER_MAX_SENDING_MAX = 15; 47 | DEFAULT_REBALANCER_MAX_RECEIVING = 100; 48 | CALL_TIMEOUT_MIN = 0.5; 49 | CALL_TIMEOUT_MAX = 64; 50 | FAILOVER_UP_TIMEOUT = 5; 51 | FAILOVER_DOWN_TIMEOUT = 1; 52 | FAILOVER_DOWN_SEQUENTIAL_FAIL = 3; 53 | DEFAULT_FAILOVER_PING_TIMEOUT = 5; 54 | DEFAULT_SYNC_TIMEOUT = 1; 55 | RECONNECT_TIMEOUT = 0.5; 56 | GC_BACKOFF_INTERVAL = 5, 57 | GC_MAP_CALL_TIMEOUT = 64, 58 | GC_WAIT_LSN_TIMEOUT = 64, 59 | GC_WAIT_LSN_STEP = 0.1, 60 | RECOVERY_BACKOFF_INTERVAL = 5, 61 | RECOVERY_GET_STAT_TIMEOUT = 5, 62 | REPLICA_BACKOFF_INTERVAL = 5, 63 | REPLICA_NOACTIVITY_TIMEOUT = 30, 64 | DEFAULT_BUCKET_SEND_TIMEOUT = 10, 65 | DEFAULT_BUCKET_RECV_TIMEOUT = 10, 66 | REPLICA_MAX_LAG = 30, 67 | REPLICA_MAX_IDLE = 30, 68 | 69 | DEFAULT_SCHED_REF_QUOTA = 300, 70 | DEFAULT_SCHED_MOVE_QUOTA = 1, 71 | 72 | DISCOVERY_IDLE_INTERVAL = 10, 73 | DISCOVERY_WORK_INTERVAL = 1, 74 | DISCOVERY_WORK_STEP = 0.01, 75 | DISCOVERY_TIMEOUT = 10, 76 | 77 | MASTER_SEARCH_IDLE_INTERVAL = 5, 78 | MASTER_SEARCH_WORK_INTERVAL = 0.5, 79 | MASTER_SEARCH_BACKOFF_INTERVAL = 5, 80 | MASTER_SEARCH_TIMEOUT = 5, 81 | 82 | TIMEOUT_INFINITY = 500 * 365 * 86400, 83 | DEADLINE_INFINITY = math.huge, 84 | } 85 | -------------------------------------------------------------------------------- /vshard/hash.lua: -------------------------------------------------------------------------------- 1 | -- hash.lua 2 | local ldigest = require('digest') 3 | local mpencode = require('msgpackffi').encode 4 | 5 | -- 6 | -- Fast and simple hash. However it works incorrectly with 7 | -- floating point cdata values. Also hash of an integer value 8 | -- depends on its type: Lua number, cdata int64, cdata uint64. 9 | -- 10 | local function strcrc32(shard_key) 11 | if type(shard_key) ~= 'table' then 12 | return ldigest.crc32(tostring(shard_key)) 13 | else 14 | local crc32 = ldigest.crc32.new() 15 | for _, v in ipairs(shard_key) do 16 | crc32:update(tostring(v)) 17 | end 18 | return crc32:result() 19 | end 20 | end 21 | 22 | local function mpcrc32_one(value) 23 | if type(value) ~= 'string' then 24 | return mpencode(value) 25 | else 26 | -- Despite the function called 'mp', strings are not 27 | -- encoded. This is because it does not make much sense to 28 | -- copy the whole string onto a temporary buffer just to 29 | -- add a small MessagePack header. Such 'hack' makes 30 | -- hashing of strings several magnitudes of order faster. 31 | return value 32 | end 33 | end 34 | 35 | -- 36 | -- Stable hash providing the correct values for integers not 37 | -- depending on their size. However may return different hashes 38 | -- for the same floating point value if it is cdata float or cdata 39 | -- double. 40 | -- 41 | local function mpcrc32(shard_key) 42 | if type(shard_key) ~= 'table' then 43 | return ldigest.crc32(mpcrc32_one(shard_key)) 44 | else 45 | local crc32 = ldigest.crc32.new() 46 | for _, v in ipairs(shard_key) do 47 | crc32:update(mpcrc32_one(v)) 48 | end 49 | return crc32:result() 50 | end 51 | end 52 | 53 | return { 54 | strcrc32 = strcrc32, 55 | mpcrc32 = mpcrc32, 56 | } 57 | -------------------------------------------------------------------------------- /vshard/init.lua: -------------------------------------------------------------------------------- 1 | local consts = require('vshard.consts') 2 | 3 | return { 4 | _VERSION = consts.VERSION, 5 | router = require('vshard.router'), 6 | storage = require('vshard.storage'), 7 | consts = consts, 8 | error = require('vshard.error'), 9 | } 10 | -------------------------------------------------------------------------------- /vshard/registry.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- Registry is a way to resolve cyclic dependencies which normally can exist 3 | -- between files of the same module/library. 4 | -- 5 | -- Files, which want to expose their API to the other files, which in turn can't 6 | -- require the formers directly, should put their API to the registry. 7 | -- 8 | -- The files should use the registry to get API of the other files. They don't 9 | -- require() and use the latter directly if there is a known loop dependency 10 | -- between them. 11 | -- 12 | -- At runtime, when all require() are done, the registry is full, and all the 13 | -- files see API of each other. 14 | -- 15 | -- Having the modules accessed via the registry adds at lest +1 indexing 16 | -- operation at runtime when need to get a function from there. But sometimes it 17 | -- can be cached to reduce the effect in perf-sensitive code. For example, like 18 | -- this: 19 | -- 20 | -- local lreg = require('vshard.registry') 21 | -- 22 | -- local storage_func 23 | -- 24 | -- local function storage_func_no_cache(...) 25 | -- storage_func = lreg.storage.func 26 | -- return storage_func(...) 27 | -- end 28 | -- 29 | -- storage_func = storage_func_no_cache 30 | -- 31 | -- The code will always call storage_func(), but will load it from the registry 32 | -- only on first invocation. 33 | -- 34 | -- However in case reload is important, it is not possible - the original 35 | -- function object in the registry may change. In such situation still makes 36 | -- sense to cache at least 'lreg.storage' to save 1 indexing operation. 37 | -- 38 | -- local lreg = require('vshard.registry') 39 | -- 40 | -- local lstorage 41 | -- local storage_func 42 | -- 43 | -- local function storage_func_cache(...) 44 | -- return lstorage.storage_func(...) 45 | -- end 46 | -- 47 | -- local function storage_func_no_cache(...) 48 | -- lstorage = lref.storage 49 | -- storage_func = storage_func_cache 50 | -- return lstorage.storage_func(...) 51 | -- end 52 | -- 53 | -- storage_func = storage_func_no_cache 54 | -- 55 | -- A harder way would be to use the first approach + add triggers on reload of 56 | -- the cached module to update the cached function refs. If the code is 57 | -- extremely perf-critical (which should not be Lua then). 58 | -- 59 | 60 | local MODULE_INTERNALS = '__module_vshard_registry' 61 | 62 | local M = rawget(_G, MODULE_INTERNALS) 63 | if not M then 64 | M = {} 65 | rawset(_G, MODULE_INTERNALS, M) 66 | end 67 | 68 | return M 69 | -------------------------------------------------------------------------------- /vshard/rlist.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- A subset of rlist methods from the main repository. Rlist is a 3 | -- doubly linked list, and is used here to implement a queue of 4 | -- routes in the parallel rebalancer. 5 | -- 6 | local rlist_index = {} 7 | 8 | function rlist_index.add_tail(rlist, object) 9 | local last = rlist.last 10 | if last then 11 | last.next = object 12 | object.prev = last 13 | else 14 | rlist.first = object 15 | end 16 | rlist.last = object 17 | rlist.count = rlist.count + 1 18 | end 19 | 20 | function rlist_index.remove(rlist, object) 21 | local prev = object.prev 22 | local next = object.next 23 | local belongs_to_list = false 24 | if prev then 25 | belongs_to_list = true 26 | prev.next = next 27 | end 28 | if next then 29 | belongs_to_list = true 30 | next.prev = prev 31 | end 32 | object.prev = nil 33 | object.next = nil 34 | if rlist.last == object then 35 | belongs_to_list = true 36 | rlist.last = prev 37 | end 38 | if rlist.first == object then 39 | belongs_to_list = true 40 | rlist.first = next 41 | end 42 | if belongs_to_list then 43 | rlist.count = rlist.count - 1 44 | end 45 | end 46 | 47 | local rlist_mt = { 48 | __index = rlist_index, 49 | } 50 | 51 | local function rlist_new() 52 | return setmetatable({count = 0}, rlist_mt) 53 | end 54 | 55 | return { 56 | new = rlist_new, 57 | } 58 | -------------------------------------------------------------------------------- /vshard/router/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | install(FILES init.lua 2 | DESTINATION ${TARANTOOL_INSTALL_LUADIR}/vshard/router) 3 | -------------------------------------------------------------------------------- /vshard/service_info.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- Every vshard's background service (e.g. rebalancer or discovery) 3 | -- should have an associated instance of the service_info, which 4 | -- stores the status and the current activity of the service. 5 | -- 6 | -- It may be accessed explicitly through the internals of the storage 7 | -- or the router, which is done for the testing of the background fibers 8 | -- using luatest. For monitoring purposes the info can be accessed via 9 | -- vshard.router/storage.info({with_services = true}). 10 | -- 11 | 12 | local SERVICE_TEMPLATE = { 13 | -- For logging purposes 14 | name = nil, 15 | -- Intended to show whether everything is all right. 16 | status = 'ok', 17 | -- Increasing number of status changes. 18 | status_idx = 0, 19 | -- Intended to show what the service is doing right now. 20 | activity = 'unknown', 21 | -- Shows whether error was already set on the current iteration. 22 | -- Should be dropped on every iteration to false with info:next_iter(). 23 | is_error_set = false, 24 | -- Active error 25 | error = '', 26 | } 27 | 28 | local function service_next_iter(service) 29 | service.is_error_set = false 30 | end 31 | 32 | local function service_set_status(service, status) 33 | service.status_idx = service.status_idx + 1 34 | service.status = status 35 | end 36 | 37 | -- Just for consistency with set_status 38 | local function service_set_activity(service, activity) 39 | service.activity = activity 40 | end 41 | 42 | local function service_set_status_ok(service) 43 | service_set_status(service, 'ok') 44 | service_next_iter(service) 45 | service.error = '' 46 | end 47 | 48 | local function service_set_status_error(service, err, ...) 49 | local err_str = string.format(err, ...) 50 | if not service.is_error_set then 51 | -- Error is supposed to be saved only if one has not already been 52 | -- set. New error can be set only after info:set_status_ok() or 53 | -- after resetting iteration with next_iter(). 54 | service_set_status(service, 'error') 55 | service.is_error_set = true 56 | service.error = err_str 57 | end 58 | return err_str 59 | end 60 | 61 | -- Get a copy of all data contained in the service 62 | local function service_info(service) 63 | local data = table.deepcopy(service) 64 | -- Implementation detail 65 | data.is_error_set = nil 66 | return data 67 | end 68 | 69 | local service_mt = { 70 | __index = { 71 | -- Low-level setters 72 | set_status = service_set_status, 73 | set_activity = service_set_activity, 74 | next_iter = service_next_iter, 75 | -- High-level wrappers 76 | set_status_ok = service_set_status_ok, 77 | set_status_error = service_set_status_error, 78 | -- Misc 79 | info = service_info, 80 | } 81 | } 82 | 83 | local function service_new(name) 84 | local service = table.deepcopy(SERVICE_TEMPLATE) 85 | setmetatable(service, service_mt) 86 | service.name = name or 'default' 87 | return service 88 | end 89 | 90 | return { 91 | new = service_new, 92 | } 93 | -------------------------------------------------------------------------------- /vshard/storage/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | install(FILES init.lua reload_evolution.lua ref.lua sched.lua schema.lua 2 | export_log.lua exports.lua 3 | DESTINATION ${TARANTOOL_INSTALL_LUADIR}/vshard/storage) 4 | -------------------------------------------------------------------------------- /vshard/storage/export_log.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- Export log module provides all exports for all vshard schema versions. This 3 | -- is used by vshard itself to deploy the exports and can be used by external 4 | -- tools to do the same. 5 | -- 6 | -- Note that it contains only the exports. Not the complete schema. I.e. there 7 | -- is no info about users and spaces and whatever else is not for direct usage 8 | -- by users and routers. 9 | -- 10 | -- The export log has the format: 11 | -- 12 | -- [ 13 | -- { 14 | -- version = , 15 | -- funcs = { 16 | -- = [ 17 | -- { 18 | -- since_core = , 19 | -- def = , 24 | -- ... 25 | -- }, 26 | -- 27 | -- ... 28 | -- ] 29 | -- }, 30 | -- 31 | -- ... 32 | -- 33 | 34 | -------------------------------------------------------------------------------- 35 | -- Version 0.1.15.0 36 | -------------------------------------------------------------------------------- 37 | local version_0_1_15_0_func_names = { 38 | 'vshard.storage.sync', 39 | 'vshard.storage.call', 40 | 'vshard.storage.bucket_force_create', 41 | 'vshard.storage.bucket_force_drop', 42 | 'vshard.storage.bucket_collect', 43 | 'vshard.storage.bucket_send', 44 | 'vshard.storage.bucket_recv', 45 | 'vshard.storage.bucket_stat', 46 | 'vshard.storage.buckets_count', 47 | 'vshard.storage.buckets_info', 48 | 'vshard.storage.buckets_discovery', 49 | 'vshard.storage.rebalancer_request_state', 50 | 'vshard.storage.rebalancer_apply_routes', 51 | } 52 | local version_0_1_15_0_funcs = {} 53 | for _, name in pairs(version_0_1_15_0_func_names) do 54 | version_0_1_15_0_funcs[name] = { 55 | {since_core = '1.10.0', def = {setuid = true}} 56 | } 57 | end 58 | -- 59 | -- Make vshard.storage.bucket_recv() understand raw args as msgpack object. It 60 | -- doesn't necessarily make it faster, but is essential to preserve the original 61 | -- tuples as is, without their contortion through Lua tables. It would break 62 | -- field types like MP_BIN (varbinary). 63 | -- 64 | table.insert(version_0_1_15_0_funcs['vshard.storage.bucket_recv'], { 65 | since_core = '2.10.0-beta2', 66 | def = {setuid = true, takes_raw_args = true} 67 | }) 68 | local version_0_1_15_0 = { 69 | version = '0.1.15.0', 70 | funcs = version_0_1_15_0_funcs 71 | } 72 | 73 | -------------------------------------------------------------------------------- 74 | -- Version 0.1.16.0 75 | -------------------------------------------------------------------------------- 76 | local version_0_1_16_0 = table.deepcopy(version_0_1_15_0) 77 | version_0_1_16_0.version = '0.1.16.0' 78 | -- vshard.storage._call() is supposed to replace some internal functions exposed 79 | -- in _func; to allow introduction of new functions on replicas; to allow change 80 | -- of internal functions without touching the schema. 81 | version_0_1_16_0.funcs['vshard.storage._call'] = { 82 | {since_core = '1.10.0', def = {setuid = true}} 83 | } 84 | -- Don't drop old functions in the same version. Removal can happen only after 85 | -- 0.1.16. Or there should appear support of rebalancing from too old versions. 86 | -- Drop of these functions now would immediately make it impossible to rebalance 87 | -- from old instances. 88 | 89 | -------------------------------------------------------------------------------- 90 | return { 91 | version_0_1_15_0, 92 | version_0_1_16_0, 93 | } 94 | -------------------------------------------------------------------------------- /vshard/storage/reload_evolution.lua: -------------------------------------------------------------------------------- 1 | -- 2 | -- This module is used to upgrade the vshard.storage on the fly. 3 | -- It updates internal Lua structures in case they are changed 4 | -- in a commit. 5 | -- 6 | local log = require('log') 7 | local fiber = require('fiber') 8 | 9 | -- 10 | -- Array of upgrade functions. 11 | -- migrations[version] = function which upgrades module version 12 | -- from `version` to `version + 1`. 13 | -- 14 | local migrations = {} 15 | 16 | -- Initialize reload_upgrade mechanism 17 | migrations[#migrations + 1] = function(_) 18 | -- Code to update Lua objects. 19 | end 20 | 21 | migrations[#migrations + 1] = function(M) 22 | local bucket = box.space._bucket 23 | if bucket then 24 | assert(M.bucket_on_replace == nil) 25 | M.bucket_on_replace = bucket:on_replace()[1] 26 | end 27 | end 28 | 29 | migrations[#migrations + 1] = function(M) 30 | if not M.route_map then 31 | M.bucket_generation_cond = fiber.cond() 32 | M.route_map = {} 33 | end 34 | end 35 | 36 | -- 37 | -- Perform an update based on a version stored in `M` (internals). 38 | -- @param M Old module internals which should be updated. 39 | -- 40 | local function upgrade(M) 41 | local start_version = M.reload_version or 1 42 | if start_version > #migrations then 43 | local err_msg = string.format( 44 | 'vshard.storage.reload_evolution: ' .. 45 | 'auto-downgrade is not implemented; ' .. 46 | 'loaded version is %d, upgrade script version is %d', 47 | start_version, #migrations 48 | ) 49 | log.error(err_msg) 50 | error(err_msg) 51 | end 52 | for i = start_version + 1, #migrations do 53 | local ok, err = pcall(migrations[i], M) 54 | if ok then 55 | log.info('vshard.storage.reload_evolution: upgraded to %d version', 56 | i) 57 | else 58 | local err_msg = string.format( 59 | 'vshard.storage.reload_evolution: ' .. 60 | 'error during upgrade to %d version: %s', i, err 61 | ) 62 | log.error(err_msg) 63 | error(err_msg) 64 | end 65 | -- Update the version just after upgrade to have an 66 | -- actual version in case of an error. 67 | M.reload_version = i 68 | end 69 | end 70 | 71 | return { 72 | version = #migrations, 73 | upgrade = upgrade, 74 | } 75 | --------------------------------------------------------------------------------